Merge branch 'timers/urgent' into timers/core
Pick up dependent changes.
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 15f79c2..7737ab5 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -866,6 +866,15 @@
dscc4.setup= [NET]
+ dt_cpu_ftrs= [PPC]
+ Format: {"off" | "known"}
+ Control how the dt_cpu_ftrs device-tree binding is
+ used for CPU feature discovery and setup (if it
+ exists).
+ off: Do not use it, fall back to legacy cpu table.
+ known: Do not pass through unknown features to guests
+ or userspace, only those that the kernel is aware of.
+
dump_apple_properties [X86]
Dump name and content of EFI device properties on
x86 Macs. Useful for driver authors to determine
@@ -3802,6 +3811,13 @@
expediting. Set to zero to disable automatic
expediting.
+ stack_guard_gap= [MM]
+ override the default stack gap protection. The value
+ is in page units and it defines how many pages prior
+ to (for stacks growing down) resp. after (for stacks
+ growing up) the main stack are reserved for no other
+ mapping. Default value is 256 pages.
+
stacktrace [FTRACE]
Enabled the stack tracer on boot up.
diff --git a/Documentation/devicetree/bindings/net/dsa/marvell.txt b/Documentation/devicetree/bindings/net/dsa/marvell.txt
index 7ef9dbb..1d4d0f4 100644
--- a/Documentation/devicetree/bindings/net/dsa/marvell.txt
+++ b/Documentation/devicetree/bindings/net/dsa/marvell.txt
@@ -26,6 +26,10 @@
- interrupt-controller : Indicates the switch is itself an interrupt
controller. This is used for the PHY interrupts.
#interrupt-cells = <2> : Controller uses two cells, number and flag
+- eeprom-length : Set to the length of an EEPROM connected to the
+ switch. Must be set if the switch can not detect
+ the presence and/or size of a connected EEPROM,
+ otherwise optional.
- mdio : Container of PHY and devices on the switches MDIO
bus.
- mdio? : Container of PHYs and devices on the external MDIO
diff --git a/Documentation/devicetree/bindings/usb/dwc2.txt b/Documentation/devicetree/bindings/usb/dwc2.txt
index 00bea03..fcf199b 100644
--- a/Documentation/devicetree/bindings/usb/dwc2.txt
+++ b/Documentation/devicetree/bindings/usb/dwc2.txt
@@ -10,6 +10,7 @@
- "rockchip,rk3288-usb", "rockchip,rk3066-usb", "snps,dwc2": for rk3288 Soc;
- "lantiq,arx100-usb": The DWC2 USB controller instance in Lantiq ARX SoCs;
- "lantiq,xrx200-usb": The DWC2 USB controller instance in Lantiq XRX SoCs;
+ - "amlogic,meson8-usb": The DWC2 USB controller instance in Amlogic Meson8 SoCs;
- "amlogic,meson8b-usb": The DWC2 USB controller instance in Amlogic Meson8b SoCs;
- "amlogic,meson-gxbb-usb": The DWC2 USB controller instance in Amlogic S905 SoCs;
- "amcc,dwc-otg": The DWC2 USB controller instance in AMCC Canyonlands 460EX SoCs;
diff --git a/Documentation/networking/dpaa.txt b/Documentation/networking/dpaa.txt
new file mode 100644
index 0000000..76e016d
--- /dev/null
+++ b/Documentation/networking/dpaa.txt
@@ -0,0 +1,194 @@
+The QorIQ DPAA Ethernet Driver
+==============================
+
+Authors:
+Madalin Bucur <madalin.bucur@nxp.com>
+Camelia Groza <camelia.groza@nxp.com>
+
+Contents
+========
+
+ - DPAA Ethernet Overview
+ - DPAA Ethernet Supported SoCs
+ - Configuring DPAA Ethernet in your kernel
+ - DPAA Ethernet Frame Processing
+ - DPAA Ethernet Features
+ - Debugging
+
+DPAA Ethernet Overview
+======================
+
+DPAA stands for Data Path Acceleration Architecture and it is a
+set of networking acceleration IPs that are available on several
+generations of SoCs, both on PowerPC and ARM64.
+
+The Freescale DPAA architecture consists of a series of hardware blocks
+that support Ethernet connectivity. The Ethernet driver depends upon the
+following drivers in the Linux kernel:
+
+ - Peripheral Access Memory Unit (PAMU) (* needed only for PPC platforms)
+ drivers/iommu/fsl_*
+ - Frame Manager (FMan)
+ drivers/net/ethernet/freescale/fman
+ - Queue Manager (QMan), Buffer Manager (BMan)
+ drivers/soc/fsl/qbman
+
+A simplified view of the dpaa_eth interfaces mapped to FMan MACs:
+
+ dpaa_eth /eth0\ ... /ethN\
+ driver | | | |
+ ------------- ---- ----------- ---- -------------
+ -Ports / Tx Rx \ ... / Tx Rx \
+ FMan | | | |
+ -MACs | MAC0 | | MACN |
+ / dtsec0 \ ... / dtsecN \ (or tgec)
+ / \ / \(or memac)
+ --------- -------------- --- -------------- ---------
+ FMan, FMan Port, FMan SP, FMan MURAM drivers
+ ---------------------------------------------------------
+ FMan HW blocks: MURAM, MACs, Ports, SP
+ ---------------------------------------------------------
+
+The dpaa_eth relation to the QMan, BMan and FMan:
+ ________________________________
+ dpaa_eth / eth0 \
+ driver / \
+ --------- -^- -^- -^- --- ---------
+ QMan driver / \ / \ / \ \ / | BMan |
+ |Rx | |Rx | |Tx | |Tx | | driver |
+ --------- |Dfl| |Err| |Cnf| |FQs| | |
+ QMan HW |FQ | |FQ | |FQs| | | | |
+ / \ / \ / \ \ / | |
+ --------- --- --- --- -v- ---------
+ | FMan QMI | |
+ | FMan HW FMan BMI | BMan HW |
+ ----------------------- --------
+
+where the acronyms used above (and in the code) are:
+DPAA = Data Path Acceleration Architecture
+FMan = DPAA Frame Manager
+QMan = DPAA Queue Manager
+BMan = DPAA Buffers Manager
+QMI = QMan interface in FMan
+BMI = BMan interface in FMan
+FMan SP = FMan Storage Profiles
+MURAM = Multi-user RAM in FMan
+FQ = QMan Frame Queue
+Rx Dfl FQ = default reception FQ
+Rx Err FQ = Rx error frames FQ
+Tx Cnf FQ = Tx confirmation FQs
+Tx FQs = transmission frame queues
+dtsec = datapath three speed Ethernet controller (10/100/1000 Mbps)
+tgec = ten gigabit Ethernet controller (10 Gbps)
+memac = multirate Ethernet MAC (10/100/1000/10000)
+
+DPAA Ethernet Supported SoCs
+============================
+
+The DPAA drivers enable the Ethernet controllers present on the following SoCs:
+
+# PPC
+P1023
+P2041
+P3041
+P4080
+P5020
+P5040
+T1023
+T1024
+T1040
+T1042
+T2080
+T4240
+B4860
+
+# ARM
+LS1043A
+LS1046A
+
+Configuring DPAA Ethernet in your kernel
+========================================
+
+To enable the DPAA Ethernet driver, the following Kconfig options are required:
+
+# common for arch/arm64 and arch/powerpc platforms
+CONFIG_FSL_DPAA=y
+CONFIG_FSL_FMAN=y
+CONFIG_FSL_DPAA_ETH=y
+CONFIG_FSL_XGMAC_MDIO=y
+
+# for arch/powerpc only
+CONFIG_FSL_PAMU=y
+
+# common options needed for the PHYs used on the RDBs
+CONFIG_VITESSE_PHY=y
+CONFIG_REALTEK_PHY=y
+CONFIG_AQUANTIA_PHY=y
+
+DPAA Ethernet Frame Processing
+==============================
+
+On Rx, buffers for the incoming frames are retrieved from one of the three
+existing buffers pools. The driver initializes and seeds these, each with
+buffers of different sizes: 1KB, 2KB and 4KB.
+
+On Tx, all transmitted frames are returned to the driver through Tx
+confirmation frame queues. The driver is then responsible for freeing the
+buffers. In order to do this properly, a backpointer is added to the buffer
+before transmission that points to the skb. When the buffer returns to the
+driver on a confirmation FQ, the skb can be correctly consumed.
+
+DPAA Ethernet Features
+======================
+
+Currently the DPAA Ethernet driver enables the basic features required for
+a Linux Ethernet driver. The support for advanced features will be added
+gradually.
+
+The driver has Rx and Tx checksum offloading for UDP and TCP. Currently the Rx
+checksum offload feature is enabled by default and cannot be controlled through
+ethtool.
+
+The driver has support for multiple prioritized Tx traffic classes. Priorities
+range from 0 (lowest) to 3 (highest). These are mapped to HW workqueues with
+strict priority levels. Each traffic class contains NR_CPU TX queues. By
+default, only one traffic class is enabled and the lowest priority Tx queues
+are used. Higher priority traffic classes can be enabled with the mqprio
+qdisc. For example, all four traffic classes are enabled on an interface with
+the following command. Furthermore, skb priority levels are mapped to traffic
+classes as follows:
+
+ * priorities 0 to 3 - traffic class 0 (low priority)
+ * priorities 4 to 7 - traffic class 1 (medium-low priority)
+ * priorities 8 to 11 - traffic class 2 (medium-high priority)
+ * priorities 12 to 15 - traffic class 3 (high priority)
+
+tc qdisc add dev <int> root handle 1: \
+ mqprio num_tc 4 map 0 0 0 0 1 1 1 1 2 2 2 2 3 3 3 3 hw 1
+
+Debugging
+=========
+
+The following statistics are exported for each interface through ethtool:
+
+ - interrupt count per CPU
+ - Rx packets count per CPU
+ - Tx packets count per CPU
+ - Tx confirmed packets count per CPU
+ - Tx S/G frames count per CPU
+ - Tx error count per CPU
+ - Rx error count per CPU
+ - Rx error count per type
+ - congestion related statistics:
+ - congestion status
+ - time spent in congestion
+ - number of time the device entered congestion
+ - dropped packets count per cause
+
+The driver also exports the following information in sysfs:
+
+ - the FQ IDs for each FQ type
+ /sys/devices/platform/dpaa-ethernet.0/net/<int>/fqids
+
+ - the IDs of the buffer pools in use
+ /sys/devices/platform/dpaa-ethernet.0/net/<int>/bpids
diff --git a/Documentation/networking/scaling.txt b/Documentation/networking/scaling.txt
index 59f4db2..f55639d 100644
--- a/Documentation/networking/scaling.txt
+++ b/Documentation/networking/scaling.txt
@@ -122,7 +122,7 @@
or will be computed in the stack. Capable hardware can pass the hash in
the receive descriptor for the packet; this would usually be the same
hash used for RSS (e.g. computed Toeplitz hash). The hash is saved in
-skb->rx_hash and can be used elsewhere in the stack as a hash of the
+skb->hash and can be used elsewhere in the stack as a hash of the
packet’s flow.
Each receive hardware queue has an associated list of CPUs to which
diff --git a/Documentation/networking/tcp.txt b/Documentation/networking/tcp.txt
index bdc4c0d..9c7139d 100644
--- a/Documentation/networking/tcp.txt
+++ b/Documentation/networking/tcp.txt
@@ -1,7 +1,7 @@
TCP protocol
============
-Last updated: 9 February 2008
+Last updated: 3 June 2017
Contents
========
@@ -29,18 +29,19 @@
A congestion control mechanism can be registered through functions in
tcp_cong.c. The functions used by the congestion control mechanism are
registered via passing a tcp_congestion_ops struct to
-tcp_register_congestion_control. As a minimum name, ssthresh,
-cong_avoid must be valid.
+tcp_register_congestion_control. As a minimum, the congestion control
+mechanism must provide a valid name and must implement either ssthresh,
+cong_avoid and undo_cwnd hooks or the "omnipotent" cong_control hook.
Private data for a congestion control mechanism is stored in tp->ca_priv.
tcp_ca(tp) returns a pointer to this space. This is preallocated space - it
is important to check the size of your private data will fit this space, or
-alternatively space could be allocated elsewhere and a pointer to it could
+alternatively, space could be allocated elsewhere and a pointer to it could
be stored here.
There are three kinds of congestion control algorithms currently: The
simplest ones are derived from TCP reno (highspeed, scalable) and just
-provide an alternative the congestion window calculation. More complex
+provide an alternative congestion window calculation. More complex
ones like BIC try to look at other events to provide better
heuristics. There are also round trip time based algorithms like
Vegas and Westwood+.
@@ -49,21 +50,15 @@
needs to maintain fairness and performance. Please review current
research and RFC's before developing new modules.
-The method that is used to determine which congestion control mechanism is
-determined by the setting of the sysctl net.ipv4.tcp_congestion_control.
-The default congestion control will be the last one registered (LIFO);
-so if you built everything as modules, the default will be reno. If you
-build with the defaults from Kconfig, then CUBIC will be builtin (not a
-module) and it will end up the default.
+The default congestion control mechanism is chosen based on the
+DEFAULT_TCP_CONG Kconfig parameter. If you really want a particular default
+value then you can set it using sysctl net.ipv4.tcp_congestion_control. The
+module will be autoloaded if needed and you will get the expected protocol. If
+you ask for an unknown congestion method, then the sysctl attempt will fail.
-If you really want a particular default value then you will need
-to set it with the sysctl. If you use a sysctl, the module will be autoloaded
-if needed and you will get the expected protocol. If you ask for an
-unknown congestion method, then the sysctl attempt will fail.
-
-If you remove a tcp congestion control module, then you will get the next
+If you remove a TCP congestion control module, then you will get the next
available one. Since reno cannot be built as a module, and cannot be
-deleted, it will always be available.
+removed, it will always be available.
How the new TCP output machine [nyi] works.
===========================================
diff --git a/MAINTAINERS b/MAINTAINERS
index 053c3bd..09b5ab6 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1172,7 +1172,7 @@
ARM/CIRRUS LOGIC EP93XX ARM ARCHITECTURE
M: Hartley Sweeten <hsweeten@visionengravers.com>
-M: Ryan Mallon <rmallon@gmail.com>
+M: Alexander Sverdlin <alexander.sverdlin@gmail.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: arch/arm/mach-ep93xx/
@@ -1489,13 +1489,15 @@
M: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
-F: arch/arm/mach-mvebu/
-F: drivers/rtc/rtc-armada38x.c
F: arch/arm/boot/dts/armada*
F: arch/arm/boot/dts/kirkwood*
+F: arch/arm/configs/mvebu_*_defconfig
+F: arch/arm/mach-mvebu/
F: arch/arm64/boot/dts/marvell/armada*
F: drivers/cpufreq/mvebu-cpufreq.c
-F: arch/arm/configs/mvebu_*_defconfig
+F: drivers/irqchip/irq-armada-370-xp.c
+F: drivers/irqchip/irq-mvebu-*
+F: drivers/rtc/rtc-armada38x.c
ARM/Marvell Berlin SoC support
M: Jisheng Zhang <jszhang@marvell.com>
@@ -1721,7 +1723,6 @@
ARM/SAMSUNG EXYNOS ARM ARCHITECTURES
M: Kukjin Kim <kgene@kernel.org>
M: Krzysztof Kozlowski <krzk@kernel.org>
-R: Javier Martinez Canillas <javier@osg.samsung.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
Q: https://patchwork.kernel.org/project/linux-samsung-soc/list/
@@ -1829,7 +1830,6 @@
ARM/STI ARCHITECTURE
M: Patrice Chotard <patrice.chotard@st.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-L: kernel@stlinux.com
W: http://www.stlinux.com
S: Maintained
F: arch/arm/mach-sti/
@@ -5622,7 +5622,7 @@
GENWQE (IBM Generic Workqueue Card)
M: Frank Haverkamp <haver@linux.vnet.ibm.com>
-M: Gabriel Krisman Bertazi <krisman@linux.vnet.ibm.com>
+M: Guilherme G. Piccoli <gpiccoli@linux.vnet.ibm.com>
S: Supported
F: drivers/misc/genwqe/
@@ -5667,7 +5667,6 @@
GPIO SUBSYSTEM
M: Linus Walleij <linus.walleij@linaro.org>
-M: Alexandre Courbot <gnurou@gmail.com>
L: linux-gpio@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-gpio.git
S: Maintained
@@ -7707,7 +7706,7 @@
LIVE PATCHING
M: Josh Poimboeuf <jpoimboe@redhat.com>
-M: Jessica Yu <jeyu@redhat.com>
+M: Jessica Yu <jeyu@kernel.org>
M: Jiri Kosina <jikos@kernel.org>
M: Miroslav Benes <mbenes@suse.cz>
R: Petr Mladek <pmladek@suse.com>
@@ -8508,7 +8507,7 @@
F: drivers/media/radio/radio-miropcm20*
MELLANOX MLX4 core VPI driver
-M: Yishai Hadas <yishaih@mellanox.com>
+M: Tariq Toukan <tariqt@mellanox.com>
L: netdev@vger.kernel.org
L: linux-rdma@vger.kernel.org
W: http://www.mellanox.com
@@ -8516,7 +8515,6 @@
S: Supported
F: drivers/net/ethernet/mellanox/mlx4/
F: include/linux/mlx4/
-F: include/uapi/rdma/mlx4-abi.h
MELLANOX MLX4 IB driver
M: Yishai Hadas <yishaih@mellanox.com>
@@ -8526,6 +8524,7 @@
S: Supported
F: drivers/infiniband/hw/mlx4/
F: include/linux/mlx4/
+F: include/uapi/rdma/mlx4-abi.h
MELLANOX MLX5 core VPI driver
M: Saeed Mahameed <saeedm@mellanox.com>
@@ -8538,7 +8537,6 @@
S: Supported
F: drivers/net/ethernet/mellanox/mlx5/core/
F: include/linux/mlx5/
-F: include/uapi/rdma/mlx5-abi.h
MELLANOX MLX5 IB driver
M: Matan Barak <matanb@mellanox.com>
@@ -8549,6 +8547,7 @@
S: Supported
F: drivers/infiniband/hw/mlx5/
F: include/linux/mlx5/
+F: include/uapi/rdma/mlx5-abi.h
MELEXIS MLX90614 DRIVER
M: Crt Mori <cmo@melexis.com>
@@ -8588,7 +8587,7 @@
F: drivers/media/dvb-frontends/mn88473*
MODULE SUPPORT
-M: Jessica Yu <jeyu@redhat.com>
+M: Jessica Yu <jeyu@kernel.org>
M: Rusty Russell <rusty@rustcorp.com.au>
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jeyu/linux.git modules-next
S: Maintained
@@ -10450,7 +10449,7 @@
PXA RTC DRIVER
M: Robert Jarzmik <robert.jarzmik@free.fr>
-L: rtc-linux@googlegroups.com
+L: linux-rtc@vger.kernel.org
S: Maintained
QAT DRIVER
@@ -10757,7 +10756,7 @@
REAL TIME CLOCK (RTC) SUBSYSTEM
M: Alessandro Zummo <a.zummo@towertech.it>
M: Alexandre Belloni <alexandre.belloni@free-electrons.com>
-L: rtc-linux@googlegroups.com
+L: linux-rtc@vger.kernel.org
Q: http://patchwork.ozlabs.org/project/rtc-linux/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/abelloni/linux.git
S: Maintained
@@ -11268,7 +11267,6 @@
STI CEC DRIVER
M: Benjamin Gaignard <benjamin.gaignard@linaro.org>
-L: kernel@stlinux.com
S: Maintained
F: drivers/staging/media/st-cec/
F: Documentation/devicetree/bindings/media/stih-cec.txt
@@ -11778,6 +11776,7 @@
S: Supported
F: arch/arm/mach-davinci/
F: drivers/i2c/busses/i2c-davinci.c
+F: arch/arm/boot/dts/da850*
TI DAVINCI SERIES MEDIA DRIVER
M: "Lad, Prabhakar" <prabhakar.csengg@gmail.com>
@@ -13861,7 +13860,7 @@
F: drivers/net/wireless/wl3501*
WOLFSON MICROELECTRONICS DRIVERS
-L: patches@opensource.wolfsonmicro.com
+L: patches@opensource.cirrus.com
T: git https://github.com/CirrusLogic/linux-drivers.git
W: https://github.com/CirrusLogic/linux-drivers/wiki
S: Supported
diff --git a/Makefile b/Makefile
index 470bd4d..e40c471 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 4
PATCHLEVEL = 12
SUBLEVEL = 0
-EXTRAVERSION = -rc3
+EXTRAVERSION = -rc6
NAME = Fearless Coyote
# *DOCUMENTATION*
diff --git a/arch/arc/mm/mmap.c b/arch/arc/mm/mmap.c
index 3e25e8d..2e13683 100644
--- a/arch/arc/mm/mmap.c
+++ b/arch/arc/mm/mmap.c
@@ -65,7 +65,7 @@
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
diff --git a/arch/arm/boot/compressed/efi-header.S b/arch/arm/boot/compressed/efi-header.S
index 9d5dc4f..3f7d1b7 100644
--- a/arch/arm/boot/compressed/efi-header.S
+++ b/arch/arm/boot/compressed/efi-header.S
@@ -17,14 +17,12 @@
@ there.
.inst 'M' | ('Z' << 8) | (0x1310 << 16) @ tstne r0, #0x4d000
#else
- mov r0, r0
+ W(mov) r0, r0
#endif
.endm
.macro __EFI_HEADER
#ifdef CONFIG_EFI_STUB
- b __efi_start
-
.set start_offset, __efi_start - start
.org start + 0x3c
@
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index 7c711ba..8a75687 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -130,19 +130,22 @@
.rept 7
__nop
.endr
- ARM( mov r0, r0 )
- ARM( b 1f )
- THUMB( badr r12, 1f )
- THUMB( bx r12 )
+#ifndef CONFIG_THUMB2_KERNEL
+ mov r0, r0
+#else
+ AR_CLASS( sub pc, pc, #3 ) @ A/R: switch to Thumb2 mode
+ M_CLASS( nop.w ) @ M: already in Thumb2 mode
+ .thumb
+#endif
+ W(b) 1f
.word _magic_sig @ Magic numbers to help the loader
.word _magic_start @ absolute load/run zImage address
.word _magic_end @ zImage end address
.word 0x04030201 @ endianness flag
- THUMB( .thumb )
-1: __EFI_HEADER
-
+ __EFI_HEADER
+1:
ARM_BE8( setend be ) @ go BE8 if compiled for BE8
AR_CLASS( mrs r9, cpsr )
#ifdef CONFIG_ARM_VIRT_EXT
diff --git a/arch/arm/boot/dts/am335x-sl50.dts b/arch/arm/boot/dts/am335x-sl50.dts
index c5d2589..fc864a8 100644
--- a/arch/arm/boot/dts/am335x-sl50.dts
+++ b/arch/arm/boot/dts/am335x-sl50.dts
@@ -220,7 +220,7 @@
mmc1_pins: pinmux_mmc1_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x960, PIN_INPUT | MUX_MODE7) /* spi0_cs1.gpio0_6 */
+ AM33XX_IOPAD(0x96c, PIN_INPUT | MUX_MODE7) /* uart0_rtsn.gpio1_9 */
>;
};
@@ -280,10 +280,6 @@
AM33XX_IOPAD(0x834, PIN_INPUT_PULLUP | MUX_MODE7) /* nKbdReset - gpmc_ad13.gpio1_13 */
AM33XX_IOPAD(0x838, PIN_INPUT_PULLUP | MUX_MODE7) /* nDispReset - gpmc_ad14.gpio1_14 */
AM33XX_IOPAD(0x844, PIN_INPUT_PULLUP | MUX_MODE7) /* USB1_enPower - gpmc_a1.gpio1_17 */
- /* AVR Programming - SPI Bus (bit bang) - Screen and Keyboard */
- AM33XX_IOPAD(0x954, PIN_INPUT_PULLUP | MUX_MODE7) /* Kbd/Disp/BattMOSI spi0_d0.gpio0_3 */
- AM33XX_IOPAD(0x958, PIN_INPUT_PULLUP | MUX_MODE7) /* Kbd/Disp/BattMISO spi0_d1.gpio0_4 */
- AM33XX_IOPAD(0x950, PIN_INPUT_PULLUP | MUX_MODE7) /* Kbd/Disp/BattSCLK spi0_clk.gpio0_2 */
/* PDI Bus - Battery system */
AM33XX_IOPAD(0x840, PIN_INPUT_PULLUP | MUX_MODE7) /* nBattReset gpmc_a0.gpio1_16 */
AM33XX_IOPAD(0x83c, PIN_INPUT_PULLUP | MUX_MODE7) /* BattPDIData gpmc_ad15.gpio1_15 */
@@ -384,7 +380,7 @@
pinctrl-names = "default";
pinctrl-0 = <&mmc1_pins>;
bus-width = <4>;
- cd-gpios = <&gpio0 6 GPIO_ACTIVE_LOW>;
+ cd-gpios = <&gpio1 9 GPIO_ACTIVE_LOW>;
vmmc-supply = <&vmmcsd_fixed>;
};
diff --git a/arch/arm/boot/dts/bcm283x.dtsi b/arch/arm/boot/dts/bcm283x.dtsi
index 561f27d..9444a9a 100644
--- a/arch/arm/boot/dts/bcm283x.dtsi
+++ b/arch/arm/boot/dts/bcm283x.dtsi
@@ -3,6 +3,11 @@
#include <dt-bindings/clock/bcm2835-aux.h>
#include <dt-bindings/gpio/gpio.h>
+/* firmware-provided startup stubs live here, where the secondary CPUs are
+ * spinning.
+ */
+/memreserve/ 0x00000000 0x00001000;
+
/* This include file covers the common peripherals and configuration between
* bcm2835 and bcm2836 implementations, leaving the CPU configuration to
* bcm2835.dtsi and bcm2836.dtsi.
diff --git a/arch/arm/boot/dts/imx6ul-14x14-evk.dts b/arch/arm/boot/dts/imx6ul-14x14-evk.dts
index f18e1f1..d2be8aa 100644
--- a/arch/arm/boot/dts/imx6ul-14x14-evk.dts
+++ b/arch/arm/boot/dts/imx6ul-14x14-evk.dts
@@ -120,10 +120,16 @@
ethphy0: ethernet-phy@2 {
reg = <2>;
+ micrel,led-mode = <1>;
+ clocks = <&clks IMX6UL_CLK_ENET_REF>;
+ clock-names = "rmii-ref";
};
ethphy1: ethernet-phy@1 {
reg = <1>;
+ micrel,led-mode = <1>;
+ clocks = <&clks IMX6UL_CLK_ENET2_REF>;
+ clock-names = "rmii-ref";
};
};
};
diff --git a/arch/arm/boot/dts/keystone-k2l-netcp.dtsi b/arch/arm/boot/dts/keystone-k2l-netcp.dtsi
index b6f2682..66f615a 100644
--- a/arch/arm/boot/dts/keystone-k2l-netcp.dtsi
+++ b/arch/arm/boot/dts/keystone-k2l-netcp.dtsi
@@ -137,8 +137,8 @@
/* NetCP address range */
ranges = <0 0x26000000 0x1000000>;
- clocks = <&clkpa>, <&clkcpgmac>, <&chipclk12>, <&clkosr>;
- clock-names = "pa_clk", "ethss_clk", "cpts", "osr_clk";
+ clocks = <&clkpa>, <&clkcpgmac>, <&chipclk12>;
+ clock-names = "pa_clk", "ethss_clk", "cpts";
dma-coherent;
ti,navigator-dmas = <&dma_gbe 0>,
diff --git a/arch/arm/boot/dts/keystone-k2l.dtsi b/arch/arm/boot/dts/keystone-k2l.dtsi
index b58e7eb..1486504 100644
--- a/arch/arm/boot/dts/keystone-k2l.dtsi
+++ b/arch/arm/boot/dts/keystone-k2l.dtsi
@@ -232,6 +232,14 @@
};
};
+ osr: sram@70000000 {
+ compatible = "mmio-sram";
+ reg = <0x70000000 0x10000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ clocks = <&clkosr>;
+ };
+
dspgpio0: keystone_dsp_gpio@02620240 {
compatible = "ti,keystone-dsp-gpio";
gpio-controller;
diff --git a/arch/arm/boot/dts/sunxi-h3-h5.dtsi b/arch/arm/boot/dts/sunxi-h3-h5.dtsi
index 1aeeacb..d4f600d 100644
--- a/arch/arm/boot/dts/sunxi-h3-h5.dtsi
+++ b/arch/arm/boot/dts/sunxi-h3-h5.dtsi
@@ -558,10 +558,11 @@
};
r_ccu: clock@1f01400 {
- compatible = "allwinner,sun50i-a64-r-ccu";
+ compatible = "allwinner,sun8i-h3-r-ccu";
reg = <0x01f01400 0x100>;
- clocks = <&osc24M>, <&osc32k>, <&iosc>;
- clock-names = "hosc", "losc", "iosc";
+ clocks = <&osc24M>, <&osc32k>, <&iosc>,
+ <&ccu 9>;
+ clock-names = "hosc", "losc", "iosc", "pll-periph";
#clock-cells = <1>;
#reset-cells = <1>;
};
diff --git a/arch/arm/boot/dts/versatile-pb.dts b/arch/arm/boot/dts/versatile-pb.dts
index 33a8eb2..06e2331 100644
--- a/arch/arm/boot/dts/versatile-pb.dts
+++ b/arch/arm/boot/dts/versatile-pb.dts
@@ -1,4 +1,4 @@
-#include <versatile-ab.dts>
+#include "versatile-ab.dts"
/ {
model = "ARM Versatile PB";
diff --git a/arch/arm/common/mcpm_entry.c b/arch/arm/common/mcpm_entry.c
index cf06247..2b913f1 100644
--- a/arch/arm/common/mcpm_entry.c
+++ b/arch/arm/common/mcpm_entry.c
@@ -235,7 +235,7 @@
return ret;
}
-typedef void (*phys_reset_t)(unsigned long);
+typedef typeof(cpu_reset) phys_reset_t;
void mcpm_cpu_power_down(void)
{
@@ -300,7 +300,7 @@
* on the CPU.
*/
phys_reset = (phys_reset_t)(unsigned long)__pa_symbol(cpu_reset);
- phys_reset(__pa_symbol(mcpm_entry_point));
+ phys_reset(__pa_symbol(mcpm_entry_point), false);
/* should never get here */
BUG();
@@ -389,7 +389,7 @@
__mcpm_cpu_down(cpu, cluster);
phys_reset = (phys_reset_t)(unsigned long)__pa_symbol(cpu_reset);
- phys_reset(__pa_symbol(mcpm_entry_point));
+ phys_reset(__pa_symbol(mcpm_entry_point), false);
BUG();
}
diff --git a/arch/arm/include/asm/device.h b/arch/arm/include/asm/device.h
index 36ec9c8..3234fe9 100644
--- a/arch/arm/include/asm/device.h
+++ b/arch/arm/include/asm/device.h
@@ -19,7 +19,8 @@
#ifdef CONFIG_XEN
const struct dma_map_ops *dev_dma_ops;
#endif
- bool dma_coherent;
+ unsigned int dma_coherent:1;
+ unsigned int dma_ops_setup:1;
};
struct omap_device;
diff --git a/arch/arm/include/asm/pgtable-nommu.h b/arch/arm/include/asm/pgtable-nommu.h
index 302240c..a0d726a 100644
--- a/arch/arm/include/asm/pgtable-nommu.h
+++ b/arch/arm/include/asm/pgtable-nommu.h
@@ -66,6 +66,7 @@
#define pgprot_noncached(prot) (prot)
#define pgprot_writecombine(prot) (prot)
#define pgprot_dmacoherent(prot) (prot)
+#define pgprot_device(prot) (prot)
/*
diff --git a/arch/arm/kvm/init.S b/arch/arm/kvm/init.S
index 570ed4a..5386528 100644
--- a/arch/arm/kvm/init.S
+++ b/arch/arm/kvm/init.S
@@ -104,7 +104,6 @@
@ - Write permission implies XN: disabled
@ - Instruction cache: enabled
@ - Data/Unified cache: enabled
- @ - Memory alignment checks: enabled
@ - MMU: enabled (this code must be run from an identity mapping)
mrc p15, 4, r0, c1, c0, 0 @ HSCR
ldr r2, =HSCTLR_MASK
@@ -112,8 +111,8 @@
mrc p15, 0, r1, c1, c0, 0 @ SCTLR
ldr r2, =(HSCTLR_EE | HSCTLR_FI | HSCTLR_I | HSCTLR_C)
and r1, r1, r2
- ARM( ldr r2, =(HSCTLR_M | HSCTLR_A) )
- THUMB( ldr r2, =(HSCTLR_M | HSCTLR_A | HSCTLR_TE) )
+ ARM( ldr r2, =(HSCTLR_M) )
+ THUMB( ldr r2, =(HSCTLR_M | HSCTLR_TE) )
orr r1, r1, r2
orr r0, r0, r1
mcr p15, 4, r0, c1, c0, 0 @ HSCR
diff --git a/arch/arm/mach-at91/Kconfig b/arch/arm/mach-at91/Kconfig
index 841e924..cbd959b 100644
--- a/arch/arm/mach-at91/Kconfig
+++ b/arch/arm/mach-at91/Kconfig
@@ -1,6 +1,7 @@
menuconfig ARCH_AT91
bool "Atmel SoCs"
depends on ARCH_MULTI_V4T || ARCH_MULTI_V5 || ARCH_MULTI_V7
+ select ARM_CPU_SUSPEND if PM
select COMMON_CLK_AT91
select GPIOLIB
select PINCTRL
diff --git a/arch/arm/mach-davinci/pm.c b/arch/arm/mach-davinci/pm.c
index efb8035..b5cc05d 100644
--- a/arch/arm/mach-davinci/pm.c
+++ b/arch/arm/mach-davinci/pm.c
@@ -153,7 +153,8 @@
davinci_sram_suspend = sram_alloc(davinci_cpu_suspend_sz, NULL);
if (!davinci_sram_suspend) {
pr_err("PM: cannot allocate SRAM memory\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto no_sram_mem;
}
davinci_sram_push(davinci_sram_suspend, davinci_cpu_suspend,
@@ -161,6 +162,10 @@
suspend_set_ops(&davinci_pm_ops);
+ return 0;
+
+no_sram_mem:
+ iounmap(pm_config.ddrpsc_reg_base);
no_ddrpsc_mem:
iounmap(pm_config.ddrpll_reg_base);
no_ddrpll_mem:
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index c742dfd..bd83c53 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -2311,7 +2311,14 @@
}
EXPORT_SYMBOL_GPL(arm_iommu_attach_device);
-static void __arm_iommu_detach_device(struct device *dev)
+/**
+ * arm_iommu_detach_device
+ * @dev: valid struct device pointer
+ *
+ * Detaches the provided device from a previously attached map.
+ * This voids the dma operations (dma_map_ops pointer)
+ */
+void arm_iommu_detach_device(struct device *dev)
{
struct dma_iommu_mapping *mapping;
@@ -2324,22 +2331,10 @@
iommu_detach_device(mapping->domain, dev);
kref_put(&mapping->kref, release_iommu_mapping);
to_dma_iommu_mapping(dev) = NULL;
+ set_dma_ops(dev, NULL);
pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev));
}
-
-/**
- * arm_iommu_detach_device
- * @dev: valid struct device pointer
- *
- * Detaches the provided device from a previously attached map.
- * This voids the dma operations (dma_map_ops pointer)
- */
-void arm_iommu_detach_device(struct device *dev)
-{
- __arm_iommu_detach_device(dev);
- set_dma_ops(dev, NULL);
-}
EXPORT_SYMBOL_GPL(arm_iommu_detach_device);
static const struct dma_map_ops *arm_get_iommu_dma_map_ops(bool coherent)
@@ -2379,7 +2374,7 @@
if (!mapping)
return;
- __arm_iommu_detach_device(dev);
+ arm_iommu_detach_device(dev);
arm_iommu_release_mapping(mapping);
}
@@ -2430,9 +2425,13 @@
dev->dma_ops = xen_dma_ops;
}
#endif
+ dev->archdata.dma_ops_setup = true;
}
void arch_teardown_dma_ops(struct device *dev)
{
+ if (!dev->archdata.dma_ops_setup)
+ return;
+
arm_teardown_iommu_dma_ops(dev);
}
diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
index 2239fde..f0701d8 100644
--- a/arch/arm/mm/mmap.c
+++ b/arch/arm/mm/mmap.c
@@ -90,7 +90,7 @@
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
@@ -141,7 +141,7 @@
addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 3dcd7ec..b2024db 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -1084,10 +1084,6 @@
def_bool y
depends on COMPAT && SYSVIPC
-config KEYS_COMPAT
- def_bool y
- depends on COMPAT && KEYS
-
endmenu
menu "Power management options"
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
index c7f669f..166c9ef 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
@@ -406,8 +406,9 @@
r_ccu: clock@1f01400 {
compatible = "allwinner,sun50i-a64-r-ccu";
reg = <0x01f01400 0x100>;
- clocks = <&osc24M>, <&osc32k>, <&iosc>;
- clock-names = "hosc", "losc", "iosc";
+ clocks = <&osc24M>, <&osc32k>, <&iosc>,
+ <&ccu 11>;
+ clock-names = "hosc", "losc", "iosc", "pll-periph";
#clock-cells = <1>;
#reset-cells = <1>;
};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi
index 4d314a2..732e2e0 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi
@@ -40,7 +40,7 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include "sunxi-h3-h5.dtsi"
+#include <arm/sunxi-h3-h5.dtsi>
/ {
cpus {
diff --git a/arch/arm64/boot/dts/allwinner/sunxi-h3-h5.dtsi b/arch/arm64/boot/dts/allwinner/sunxi-h3-h5.dtsi
deleted file mode 120000
index 036f01d..0000000
--- a/arch/arm64/boot/dts/allwinner/sunxi-h3-h5.dtsi
+++ /dev/null
@@ -1 +0,0 @@
-../../../../arm/boot/dts/sunxi-h3-h5.dtsi
\ No newline at end of file
diff --git a/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi b/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi
index ac8df52..b4bc42e 100644
--- a/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi
@@ -231,8 +231,7 @@
cpm_crypto: crypto@800000 {
compatible = "inside-secure,safexcel-eip197";
reg = <0x800000 0x200000>;
- interrupts = <GIC_SPI 34 (IRQ_TYPE_EDGE_RISING
- | IRQ_TYPE_LEVEL_HIGH)>,
+ interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>,
diff --git a/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi b/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi
index 7740a75..6e20588 100644
--- a/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi
@@ -221,8 +221,7 @@
cps_crypto: crypto@800000 {
compatible = "inside-secure,safexcel-eip197";
reg = <0x800000 0x200000>;
- interrupts = <GIC_SPI 34 (IRQ_TYPE_EDGE_RISING
- | IRQ_TYPE_LEVEL_HIGH)>,
+ interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 278 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 279 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 280 IRQ_TYPE_LEVEL_HIGH>,
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index 65cdd87..97c123e 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -68,6 +68,7 @@
CONFIG_PCIE_ARMADA_8K=y
CONFIG_PCI_AARDVARK=y
CONFIG_PCIE_RCAR=y
+CONFIG_PCIE_ROCKCHIP=m
CONFIG_PCI_HOST_GENERIC=y
CONFIG_PCI_XGENE=y
CONFIG_ARM64_VA_BITS_48=y
@@ -208,6 +209,8 @@
CONFIG_WL18XX=m
CONFIG_WLCORE_SDIO=m
CONFIG_INPUT_EVDEV=y
+CONFIG_KEYBOARD_ADC=m
+CONFIG_KEYBOARD_CROS_EC=y
CONFIG_KEYBOARD_GPIO=y
CONFIG_INPUT_MISC=y
CONFIG_INPUT_PM8941_PWRKEY=y
@@ -263,6 +266,7 @@
CONFIG_SPI_ORION=y
CONFIG_SPI_PL022=y
CONFIG_SPI_QUP=y
+CONFIG_SPI_ROCKCHIP=y
CONFIG_SPI_S3C64XX=y
CONFIG_SPI_SPIDEV=m
CONFIG_SPMI=y
@@ -292,6 +296,7 @@
CONFIG_CPU_THERMAL=y
CONFIG_THERMAL_EMULATION=y
CONFIG_EXYNOS_THERMAL=y
+CONFIG_ROCKCHIP_THERMAL=m
CONFIG_WATCHDOG=y
CONFIG_S3C2410_WATCHDOG=y
CONFIG_MESON_GXBB_WATCHDOG=m
@@ -300,12 +305,14 @@
CONFIG_BCM2835_WDT=y
CONFIG_MFD_CROS_EC=y
CONFIG_MFD_CROS_EC_I2C=y
+CONFIG_MFD_CROS_EC_SPI=y
CONFIG_MFD_EXYNOS_LPASS=m
CONFIG_MFD_HI655X_PMIC=y
CONFIG_MFD_MAX77620=y
CONFIG_MFD_SPMI_PMIC=y
CONFIG_MFD_RK808=y
CONFIG_MFD_SEC_CORE=y
+CONFIG_REGULATOR_FAN53555=y
CONFIG_REGULATOR_FIXED_VOLTAGE=y
CONFIG_REGULATOR_GPIO=y
CONFIG_REGULATOR_HI655X=y
@@ -473,8 +480,10 @@
CONFIG_EXTCON_USB_GPIO=y
CONFIG_IIO=y
CONFIG_EXYNOS_ADC=y
+CONFIG_ROCKCHIP_SARADC=m
CONFIG_PWM=y
CONFIG_PWM_BCM2835=m
+CONFIG_PWM_CROS_EC=m
CONFIG_PWM_MESON=m
CONFIG_PWM_ROCKCHIP=y
CONFIG_PWM_SAMSUNG=y
@@ -484,6 +493,7 @@
CONFIG_PHY_SUN4I_USB=y
CONFIG_PHY_ROCKCHIP_INNO_USB2=y
CONFIG_PHY_ROCKCHIP_EMMC=y
+CONFIG_PHY_ROCKCHIP_PCIE=m
CONFIG_PHY_XGENE=y
CONFIG_PHY_TEGRA_XUSB=y
CONFIG_ARM_SCPI_PROTOCOL=y
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index 15c142c..b4d13d9 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -286,6 +286,10 @@
#define SCTLR_ELx_A (1 << 1)
#define SCTLR_ELx_M 1
+#define SCTLR_EL2_RES1 ((1 << 4) | (1 << 5) | (1 << 11) | (1 << 16) | \
+ (1 << 16) | (1 << 18) | (1 << 22) | (1 << 23) | \
+ (1 << 28) | (1 << 29))
+
#define SCTLR_ELx_FLAGS (SCTLR_ELx_M | SCTLR_ELx_A | SCTLR_ELx_C | \
SCTLR_ELx_SA | SCTLR_ELx_I)
diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
index 41b6e31..d0cb007 100644
--- a/arch/arm64/kernel/vdso.c
+++ b/arch/arm64/kernel/vdso.c
@@ -221,10 +221,11 @@
/* tkr_mono.cycle_last == tkr_raw.cycle_last */
vdso_data->cs_cycle_last = tk->tkr_mono.cycle_last;
vdso_data->raw_time_sec = tk->raw_time.tv_sec;
- vdso_data->raw_time_nsec = tk->raw_time.tv_nsec;
+ vdso_data->raw_time_nsec = (tk->raw_time.tv_nsec <<
+ tk->tkr_raw.shift) +
+ tk->tkr_raw.xtime_nsec;
vdso_data->xtime_clock_sec = tk->xtime_sec;
vdso_data->xtime_clock_nsec = tk->tkr_mono.xtime_nsec;
- /* tkr_raw.xtime_nsec == 0 */
vdso_data->cs_mono_mult = tk->tkr_mono.mult;
vdso_data->cs_raw_mult = tk->tkr_raw.mult;
/* tkr_mono.shift == tkr_raw.shift */
diff --git a/arch/arm64/kernel/vdso/gettimeofday.S b/arch/arm64/kernel/vdso/gettimeofday.S
index e00b467..76320e9 100644
--- a/arch/arm64/kernel/vdso/gettimeofday.S
+++ b/arch/arm64/kernel/vdso/gettimeofday.S
@@ -256,7 +256,6 @@
seqcnt_check fail=monotonic_raw
/* All computations are done with left-shifted nsecs. */
- lsl x14, x14, x12
get_nsec_per_sec res=x9
lsl x9, x9, x12
diff --git a/arch/arm64/kvm/hyp-init.S b/arch/arm64/kvm/hyp-init.S
index 839425c..3f96155 100644
--- a/arch/arm64/kvm/hyp-init.S
+++ b/arch/arm64/kvm/hyp-init.S
@@ -106,10 +106,13 @@
tlbi alle2
dsb sy
- mrs x4, sctlr_el2
- and x4, x4, #SCTLR_ELx_EE // preserve endianness of EL2
- ldr x5, =SCTLR_ELx_FLAGS
- orr x4, x4, x5
+ /*
+ * Preserve all the RES1 bits while setting the default flags,
+ * as well as the EE bit on BE. Drop the A flag since the compiler
+ * is allowed to generate unaligned accesses.
+ */
+ ldr x4, =(SCTLR_EL2_RES1 | (SCTLR_ELx_FLAGS & ~SCTLR_ELx_A))
+CPU_BE( orr x4, x4, #SCTLR_ELx_EE)
msr sctlr_el2, x4
isb
diff --git a/arch/arm64/kvm/vgic-sys-reg-v3.c b/arch/arm64/kvm/vgic-sys-reg-v3.c
index 79f37e3..6260b69 100644
--- a/arch/arm64/kvm/vgic-sys-reg-v3.c
+++ b/arch/arm64/kvm/vgic-sys-reg-v3.c
@@ -65,8 +65,8 @@
* Here set VMCR.CTLR in ICC_CTLR_EL1 layout.
* The vgic_set_vmcr() will convert to ICH_VMCR layout.
*/
- vmcr.ctlr = val & ICC_CTLR_EL1_CBPR_MASK;
- vmcr.ctlr |= val & ICC_CTLR_EL1_EOImode_MASK;
+ vmcr.cbpr = (val & ICC_CTLR_EL1_CBPR_MASK) >> ICC_CTLR_EL1_CBPR_SHIFT;
+ vmcr.eoim = (val & ICC_CTLR_EL1_EOImode_MASK) >> ICC_CTLR_EL1_EOImode_SHIFT;
vgic_set_vmcr(vcpu, &vmcr);
} else {
val = 0;
@@ -83,8 +83,8 @@
* The VMCR.CTLR value is in ICC_CTLR_EL1 layout.
* Extract it directly using ICC_CTLR_EL1 reg definitions.
*/
- val |= vmcr.ctlr & ICC_CTLR_EL1_CBPR_MASK;
- val |= vmcr.ctlr & ICC_CTLR_EL1_EOImode_MASK;
+ val |= (vmcr.cbpr << ICC_CTLR_EL1_CBPR_SHIFT) & ICC_CTLR_EL1_CBPR_MASK;
+ val |= (vmcr.eoim << ICC_CTLR_EL1_EOImode_SHIFT) & ICC_CTLR_EL1_EOImode_MASK;
p->regval = val;
}
@@ -135,7 +135,7 @@
p->regval = 0;
vgic_get_vmcr(vcpu, &vmcr);
- if (!((vmcr.ctlr & ICH_VMCR_CBPR_MASK) >> ICH_VMCR_CBPR_SHIFT)) {
+ if (!vmcr.cbpr) {
if (p->is_write) {
vmcr.abpr = (p->regval & ICC_BPR1_EL1_MASK) >>
ICC_BPR1_EL1_SHIFT;
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index 71f9305..c870d6f 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -36,6 +36,7 @@
#define TMP_REG_1 (MAX_BPF_JIT_REG + 0)
#define TMP_REG_2 (MAX_BPF_JIT_REG + 1)
#define TCALL_CNT (MAX_BPF_JIT_REG + 2)
+#define TMP_REG_3 (MAX_BPF_JIT_REG + 3)
/* Map BPF registers to A64 registers */
static const int bpf2a64[] = {
@@ -57,6 +58,7 @@
/* temporary registers for internal BPF JIT */
[TMP_REG_1] = A64_R(10),
[TMP_REG_2] = A64_R(11),
+ [TMP_REG_3] = A64_R(12),
/* tail_call_cnt */
[TCALL_CNT] = A64_R(26),
/* temporary register for blinding constants */
@@ -319,6 +321,7 @@
const u8 src = bpf2a64[insn->src_reg];
const u8 tmp = bpf2a64[TMP_REG_1];
const u8 tmp2 = bpf2a64[TMP_REG_2];
+ const u8 tmp3 = bpf2a64[TMP_REG_3];
const s16 off = insn->off;
const s32 imm = insn->imm;
const int i = insn - ctx->prog->insnsi;
@@ -689,10 +692,10 @@
emit(A64_PRFM(tmp, PST, L1, STRM), ctx);
emit(A64_LDXR(isdw, tmp2, tmp), ctx);
emit(A64_ADD(isdw, tmp2, tmp2, src), ctx);
- emit(A64_STXR(isdw, tmp2, tmp, tmp2), ctx);
+ emit(A64_STXR(isdw, tmp2, tmp, tmp3), ctx);
jmp_offset = -3;
check_imm19(jmp_offset);
- emit(A64_CBNZ(0, tmp2, jmp_offset), ctx);
+ emit(A64_CBNZ(0, tmp3, jmp_offset), ctx);
break;
/* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + imm)) */
diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
index da82c25..46aa289 100644
--- a/arch/frv/mm/elf-fdpic.c
+++ b/arch/frv/mm/elf-fdpic.c
@@ -75,7 +75,7 @@
addr = PAGE_ALIGN(addr);
vma = find_vma(current->mm, addr);
if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
goto success;
}
diff --git a/arch/hexagon/mm/uaccess.c b/arch/hexagon/mm/uaccess.c
index ec90afd..c599eb1 100644
--- a/arch/hexagon/mm/uaccess.c
+++ b/arch/hexagon/mm/uaccess.c
@@ -37,15 +37,14 @@
long uncleared;
while (count > PAGE_SIZE) {
- uncleared = __copy_to_user_hexagon(dest, &empty_zero_page,
- PAGE_SIZE);
+ uncleared = raw_copy_to_user(dest, &empty_zero_page, PAGE_SIZE);
if (uncleared)
return count - (PAGE_SIZE - uncleared);
count -= PAGE_SIZE;
dest += PAGE_SIZE;
}
if (count)
- count = __copy_to_user_hexagon(dest, &empty_zero_page, count);
+ count = raw_copy_to_user(dest, &empty_zero_page, count);
return count;
}
diff --git a/arch/mips/boot/Makefile b/arch/mips/boot/Makefile
index 2728a9a..145b5ce 100644
--- a/arch/mips/boot/Makefile
+++ b/arch/mips/boot/Makefile
@@ -128,19 +128,19 @@
-DADDR_BITS=$(ADDR_BITS) \
-DADDR_CELLS=$(itb_addr_cells)
-$(obj)/vmlinux.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S FORCE
+$(obj)/vmlinux.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S $(VMLINUX) FORCE
$(call if_changed_dep,cpp_its_S,none,vmlinux.bin)
-$(obj)/vmlinux.gz.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S FORCE
+$(obj)/vmlinux.gz.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S $(VMLINUX) FORCE
$(call if_changed_dep,cpp_its_S,gzip,vmlinux.bin.gz)
-$(obj)/vmlinux.bz2.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S FORCE
+$(obj)/vmlinux.bz2.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S $(VMLINUX) FORCE
$(call if_changed_dep,cpp_its_S,bzip2,vmlinux.bin.bz2)
-$(obj)/vmlinux.lzma.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S FORCE
+$(obj)/vmlinux.lzma.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S $(VMLINUX) FORCE
$(call if_changed_dep,cpp_its_S,lzma,vmlinux.bin.lzma)
-$(obj)/vmlinux.lzo.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S FORCE
+$(obj)/vmlinux.lzo.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S $(VMLINUX) FORCE
$(call if_changed_dep,cpp_its_S,lzo,vmlinux.bin.lzo)
quiet_cmd_itb-image = ITB $@
diff --git a/arch/mips/include/asm/highmem.h b/arch/mips/include/asm/highmem.h
index d34536e..279b6d1 100644
--- a/arch/mips/include/asm/highmem.h
+++ b/arch/mips/include/asm/highmem.h
@@ -35,7 +35,12 @@
* easily, subsequent pte tables have to be allocated in one physical
* chunk of RAM.
*/
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
+#define LAST_PKMAP 512
+#else
#define LAST_PKMAP 1024
+#endif
+
#define LAST_PKMAP_MASK (LAST_PKMAP-1)
#define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT)
#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
diff --git a/arch/mips/include/asm/kprobes.h b/arch/mips/include/asm/kprobes.h
index 291846d..ad1a999 100644
--- a/arch/mips/include/asm/kprobes.h
+++ b/arch/mips/include/asm/kprobes.h
@@ -43,7 +43,8 @@
#define flush_insn_slot(p) \
do { \
- flush_icache_range((unsigned long)p->addr, \
+ if (p->addr) \
+ flush_icache_range((unsigned long)p->addr, \
(unsigned long)p->addr + \
(MAX_INSN_SIZE * sizeof(kprobe_opcode_t))); \
} while (0)
diff --git a/arch/mips/include/asm/pgtable-32.h b/arch/mips/include/asm/pgtable-32.h
index 6f94bed..74afe8c 100644
--- a/arch/mips/include/asm/pgtable-32.h
+++ b/arch/mips/include/asm/pgtable-32.h
@@ -19,6 +19,10 @@
#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopmd.h>
+#ifdef CONFIG_HIGHMEM
+#include <asm/highmem.h>
+#endif
+
extern int temp_tlb_entry;
/*
@@ -62,7 +66,8 @@
#define VMALLOC_START MAP_BASE
-#define PKMAP_BASE (0xfe000000UL)
+#define PKMAP_END ((FIXADDR_START) & ~((LAST_PKMAP << PAGE_SHIFT)-1))
+#define PKMAP_BASE (PKMAP_END - PAGE_SIZE * LAST_PKMAP)
#ifdef CONFIG_HIGHMEM
# define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE)
diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c
index b11facd..f702a45 100644
--- a/arch/mips/kernel/branch.c
+++ b/arch/mips/kernel/branch.c
@@ -804,8 +804,10 @@
break;
}
/* Compact branch: BNEZC || JIALC */
- if (insn.i_format.rs)
+ if (!insn.i_format.rs) {
+ /* JIALC: set $31/ra */
regs->regs[31] = epc + 4;
+ }
regs->cp0_epc += 8;
break;
#endif
diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c
index 30a3b75..9d9b8fba 100644
--- a/arch/mips/kernel/ftrace.c
+++ b/arch/mips/kernel/ftrace.c
@@ -38,20 +38,6 @@
#endif
-/*
- * Check if the address is in kernel space
- *
- * Clone core_kernel_text() from kernel/extable.c, but doesn't call
- * init_kernel_text() for Ftrace doesn't trace functions in init sections.
- */
-static inline int in_kernel_space(unsigned long ip)
-{
- if (ip >= (unsigned long)_stext &&
- ip <= (unsigned long)_etext)
- return 1;
- return 0;
-}
-
#ifdef CONFIG_DYNAMIC_FTRACE
#define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */
@@ -198,7 +184,7 @@
* If ip is in kernel space, no long call, otherwise, long call is
* needed.
*/
- new = in_kernel_space(ip) ? INSN_NOP : INSN_B_1F;
+ new = core_kernel_text(ip) ? INSN_NOP : INSN_B_1F;
#ifdef CONFIG_64BIT
return ftrace_modify_code(ip, new);
#else
@@ -218,12 +204,12 @@
unsigned int new;
unsigned long ip = rec->ip;
- new = in_kernel_space(ip) ? insn_jal_ftrace_caller : insn_la_mcount[0];
+ new = core_kernel_text(ip) ? insn_jal_ftrace_caller : insn_la_mcount[0];
#ifdef CONFIG_64BIT
return ftrace_modify_code(ip, new);
#else
- return ftrace_modify_code_2r(ip, new, in_kernel_space(ip) ?
+ return ftrace_modify_code_2r(ip, new, core_kernel_text(ip) ?
INSN_NOP : insn_la_mcount[1]);
#endif
}
@@ -289,7 +275,7 @@
* instruction "lui v1, hi_16bit_of_mcount"(offset is 24), but for
* kernel, move after the instruction "move ra, at"(offset is 16)
*/
- ip = self_ra - (in_kernel_space(self_ra) ? 16 : 24);
+ ip = self_ra - (core_kernel_text(self_ra) ? 16 : 24);
/*
* search the text until finding the non-store instruction or "s{d,w}
@@ -394,7 +380,7 @@
* entries configured through the tracing/set_graph_function interface.
*/
- insns = in_kernel_space(self_ra) ? 2 : MCOUNT_OFFSET_INSNS + 1;
+ insns = core_kernel_text(self_ra) ? 2 : MCOUNT_OFFSET_INSNS + 1;
trace.func = self_ra - (MCOUNT_INSN_SIZE * insns);
/* Only trace if the calling function expects to */
diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c
index 313a88b..f3e301f 100644
--- a/arch/mips/kernel/perf_event_mipsxx.c
+++ b/arch/mips/kernel/perf_event_mipsxx.c
@@ -1597,7 +1597,6 @@
break;
case CPU_P5600:
case CPU_P6600:
- case CPU_I6400:
/* 8-bit event numbers */
raw_id = config & 0x1ff;
base_id = raw_id & 0xff;
@@ -1610,6 +1609,11 @@
raw_event.range = P;
#endif
break;
+ case CPU_I6400:
+ /* 8-bit event numbers */
+ base_id = config & 0xff;
+ raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
+ break;
case CPU_1004K:
if (IS_BOTH_COUNTERS_1004K_EVENT(base_id))
raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
index 64dd8bd..28adeab 100644
--- a/arch/mips/mm/mmap.c
+++ b/arch/mips/mm/mmap.c
@@ -93,7 +93,7 @@
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
diff --git a/arch/mips/mm/pgtable-32.c b/arch/mips/mm/pgtable-32.c
index adc6911..b19a3c5 100644
--- a/arch/mips/mm/pgtable-32.c
+++ b/arch/mips/mm/pgtable-32.c
@@ -51,15 +51,15 @@
/*
* Fixed mappings:
*/
- vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
- fixrange_init(vaddr, vaddr + FIXADDR_SIZE, pgd_base);
+ vaddr = __fix_to_virt(__end_of_fixed_addresses - 1);
+ fixrange_init(vaddr & PMD_MASK, vaddr + FIXADDR_SIZE, pgd_base);
#ifdef CONFIG_HIGHMEM
/*
* Permanent kmaps:
*/
vaddr = PKMAP_BASE;
- fixrange_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
+ fixrange_init(vaddr & PMD_MASK, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
pgd = swapper_pg_dir + __pgd_offset(vaddr);
pud = pud_offset(pgd, vaddr);
diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
index e528863..378a754 100644
--- a/arch/parisc/kernel/sys_parisc.c
+++ b/arch/parisc/kernel/sys_parisc.c
@@ -90,7 +90,7 @@
unsigned long len, unsigned long pgoff, unsigned long flags)
{
struct mm_struct *mm = current->mm;
- struct vm_area_struct *vma;
+ struct vm_area_struct *vma, *prev;
unsigned long task_size = TASK_SIZE;
int do_color_align, last_mmap;
struct vm_unmapped_area_info info;
@@ -117,9 +117,10 @@
else
addr = PAGE_ALIGN(addr);
- vma = find_vma(mm, addr);
+ vma = find_vma_prev(mm, addr, &prev);
if (task_size - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)) &&
+ (!prev || addr >= vm_end_gap(prev)))
goto found_addr;
}
@@ -143,7 +144,7 @@
const unsigned long len, const unsigned long pgoff,
const unsigned long flags)
{
- struct vm_area_struct *vma;
+ struct vm_area_struct *vma, *prev;
struct mm_struct *mm = current->mm;
unsigned long addr = addr0;
int do_color_align, last_mmap;
@@ -177,9 +178,11 @@
addr = COLOR_ALIGN(addr, last_mmap, pgoff);
else
addr = PAGE_ALIGN(addr);
- vma = find_vma(mm, addr);
+
+ vma = find_vma_prev(mm, addr, &prev);
if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)) &&
+ (!prev || addr >= vm_end_gap(prev)))
goto found_addr;
}
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index f7c8f99..bf4391d 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -380,22 +380,6 @@
menu "Kernel options"
-config PPC_DT_CPU_FTRS
- bool "Device-tree based CPU feature discovery & setup"
- depends on PPC_BOOK3S_64
- default n
- help
- This enables code to use a new device tree binding for describing CPU
- compatibility and features. Saying Y here will attempt to use the new
- binding if the firmware provides it. Currently only the skiboot
- firmware provides this binding.
- If you're not sure say Y.
-
-config PPC_CPUFEATURES_ENABLE_UNKNOWN
- bool "cpufeatures pass through unknown features to guest/userspace"
- depends on PPC_DT_CPU_FTRS
- default y
-
config HIGHMEM
bool "High memory support"
depends on PPC32
@@ -1215,11 +1199,6 @@
source "security/Kconfig"
-config KEYS_COMPAT
- bool
- depends on COMPAT && KEYS
- default y
-
source "crypto/Kconfig"
config PPC_LIB_RHEAP
diff --git a/arch/powerpc/include/asm/book3s/64/hash-4k.h b/arch/powerpc/include/asm/book3s/64/hash-4k.h
index b4b5e6b..0c4e470 100644
--- a/arch/powerpc/include/asm/book3s/64/hash-4k.h
+++ b/arch/powerpc/include/asm/book3s/64/hash-4k.h
@@ -8,7 +8,7 @@
#define H_PTE_INDEX_SIZE 9
#define H_PMD_INDEX_SIZE 7
#define H_PUD_INDEX_SIZE 9
-#define H_PGD_INDEX_SIZE 12
+#define H_PGD_INDEX_SIZE 9
#ifndef __ASSEMBLY__
#define H_PTE_TABLE_SIZE (sizeof(pte_t) << H_PTE_INDEX_SIZE)
diff --git a/arch/powerpc/include/asm/bug.h b/arch/powerpc/include/asm/bug.h
index f2c562a..0151af6 100644
--- a/arch/powerpc/include/asm/bug.h
+++ b/arch/powerpc/include/asm/bug.h
@@ -104,7 +104,7 @@
"1: "PPC_TLNEI" %4,0\n" \
_EMIT_BUG_ENTRY \
: : "i" (__FILE__), "i" (__LINE__), \
- "i" (BUGFLAG_TAINT(TAINT_WARN)), \
+ "i" (BUGFLAG_WARNING|BUGFLAG_TAINT(TAINT_WARN)),\
"i" (sizeof(struct bug_entry)), \
"r" (__ret_warn_on)); \
} \
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index c2d5095..d02ad93 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -214,7 +214,6 @@
#define CPU_FTR_DAWR LONG_ASM_CONST(0x0400000000000000)
#define CPU_FTR_DABRX LONG_ASM_CONST(0x0800000000000000)
#define CPU_FTR_PMAO_BUG LONG_ASM_CONST(0x1000000000000000)
-#define CPU_FTR_SUBCORE LONG_ASM_CONST(0x2000000000000000)
#define CPU_FTR_POWER9_DD1 LONG_ASM_CONST(0x4000000000000000)
#ifndef __ASSEMBLY__
@@ -463,7 +462,7 @@
CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
CPU_FTR_ICSWX | CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \
CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_DAWR | \
- CPU_FTR_ARCH_207S | CPU_FTR_TM_COMP | CPU_FTR_SUBCORE)
+ CPU_FTR_ARCH_207S | CPU_FTR_TM_COMP)
#define CPU_FTRS_POWER8E (CPU_FTRS_POWER8 | CPU_FTR_PMAO_BUG)
#define CPU_FTRS_POWER8_DD1 (CPU_FTRS_POWER8 & ~CPU_FTR_DBELL)
#define CPU_FTRS_POWER9 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index a2123f2..bb99b65 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -110,13 +110,18 @@
#define TASK_SIZE_128TB (0x0000800000000000UL)
#define TASK_SIZE_512TB (0x0002000000000000UL)
-#ifdef CONFIG_PPC_BOOK3S_64
+/*
+ * For now 512TB is only supported with book3s and 64K linux page size.
+ */
+#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_PPC_64K_PAGES)
/*
* Max value currently used:
*/
-#define TASK_SIZE_USER64 TASK_SIZE_512TB
+#define TASK_SIZE_USER64 TASK_SIZE_512TB
+#define DEFAULT_MAP_WINDOW_USER64 TASK_SIZE_128TB
#else
-#define TASK_SIZE_USER64 TASK_SIZE_64TB
+#define TASK_SIZE_USER64 TASK_SIZE_64TB
+#define DEFAULT_MAP_WINDOW_USER64 TASK_SIZE_64TB
#endif
/*
@@ -132,7 +137,7 @@
* space during mmap's.
*/
#define TASK_UNMAPPED_BASE_USER32 (PAGE_ALIGN(TASK_SIZE_USER32 / 4))
-#define TASK_UNMAPPED_BASE_USER64 (PAGE_ALIGN(TASK_SIZE_128TB / 4))
+#define TASK_UNMAPPED_BASE_USER64 (PAGE_ALIGN(DEFAULT_MAP_WINDOW_USER64 / 4))
#define TASK_UNMAPPED_BASE ((is_32bit_task()) ? \
TASK_UNMAPPED_BASE_USER32 : TASK_UNMAPPED_BASE_USER64 )
@@ -143,21 +148,15 @@
* with 128TB and conditionally enable upto 512TB
*/
#ifdef CONFIG_PPC_BOOK3S_64
-#define DEFAULT_MAP_WINDOW ((is_32bit_task()) ? \
- TASK_SIZE_USER32 : TASK_SIZE_128TB)
+#define DEFAULT_MAP_WINDOW ((is_32bit_task()) ? \
+ TASK_SIZE_USER32 : DEFAULT_MAP_WINDOW_USER64)
#else
#define DEFAULT_MAP_WINDOW TASK_SIZE
#endif
#ifdef __powerpc64__
-#ifdef CONFIG_PPC_BOOK3S_64
-/* Limit stack to 128TB */
-#define STACK_TOP_USER64 TASK_SIZE_128TB
-#else
-#define STACK_TOP_USER64 TASK_SIZE_USER64
-#endif
-
+#define STACK_TOP_USER64 DEFAULT_MAP_WINDOW_USER64
#define STACK_TOP_USER32 TASK_SIZE_USER32
#define STACK_TOP (is_32bit_task() ? \
diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h
index 8b3b46b..3297715 100644
--- a/arch/powerpc/include/asm/topology.h
+++ b/arch/powerpc/include/asm/topology.h
@@ -44,8 +44,22 @@
extern int sysfs_add_device_to_node(struct device *dev, int nid);
extern void sysfs_remove_device_from_node(struct device *dev, int nid);
+static inline int early_cpu_to_node(int cpu)
+{
+ int nid;
+
+ nid = numa_cpu_lookup_table[cpu];
+
+ /*
+ * Fall back to node 0 if nid is unset (it should be, except bugs).
+ * This allows callers to safely do NODE_DATA(early_cpu_to_node(cpu)).
+ */
+ return (nid < 0) ? 0 : nid;
+}
#else
+static inline int early_cpu_to_node(int cpu) { return 0; }
+
static inline void dump_numa_cpu_topology(void) {}
static inline int sysfs_add_device_to_node(struct device *dev, int nid)
diff --git a/arch/powerpc/include/asm/xive.h b/arch/powerpc/include/asm/xive.h
index c8a822a..c23ff43 100644
--- a/arch/powerpc/include/asm/xive.h
+++ b/arch/powerpc/include/asm/xive.h
@@ -94,11 +94,13 @@
* store at 0 and some ESBs support doing a trigger via a
* separate trigger page.
*/
-#define XIVE_ESB_GET 0x800
-#define XIVE_ESB_SET_PQ_00 0xc00
-#define XIVE_ESB_SET_PQ_01 0xd00
-#define XIVE_ESB_SET_PQ_10 0xe00
-#define XIVE_ESB_SET_PQ_11 0xf00
+#define XIVE_ESB_STORE_EOI 0x400 /* Store */
+#define XIVE_ESB_LOAD_EOI 0x000 /* Load */
+#define XIVE_ESB_GET 0x800 /* Load */
+#define XIVE_ESB_SET_PQ_00 0xc00 /* Load */
+#define XIVE_ESB_SET_PQ_01 0xd00 /* Load */
+#define XIVE_ESB_SET_PQ_10 0xe00 /* Load */
+#define XIVE_ESB_SET_PQ_11 0xf00 /* Load */
#define XIVE_ESB_VAL_P 0x2
#define XIVE_ESB_VAL_Q 0x1
diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c
index fcc7588..4c7656d 100644
--- a/arch/powerpc/kernel/dt_cpu_ftrs.c
+++ b/arch/powerpc/kernel/dt_cpu_ftrs.c
@@ -8,6 +8,7 @@
#include <linux/export.h>
#include <linux/init.h>
#include <linux/jump_label.h>
+#include <linux/libfdt.h>
#include <linux/memblock.h>
#include <linux/printk.h>
#include <linux/sched.h>
@@ -642,7 +643,6 @@
{"processor-control-facility", feat_enable_dbell, CPU_FTR_DBELL},
{"processor-control-facility-v3", feat_enable_dbell, CPU_FTR_DBELL},
{"processor-utilization-of-resources-register", feat_enable_purr, 0},
- {"subcore", feat_enable, CPU_FTR_SUBCORE},
{"no-execute", feat_enable, 0},
{"strong-access-ordering", feat_enable, CPU_FTR_SAO},
{"cache-inhibited-large-page", feat_enable_large_ci, 0},
@@ -671,12 +671,24 @@
{"wait-v3", feat_enable, 0},
};
-/* XXX: how to configure this? Default + boot time? */
-#ifdef CONFIG_PPC_CPUFEATURES_ENABLE_UNKNOWN
-#define CPU_FEATURE_ENABLE_UNKNOWN 1
-#else
-#define CPU_FEATURE_ENABLE_UNKNOWN 0
-#endif
+static bool __initdata using_dt_cpu_ftrs;
+static bool __initdata enable_unknown = true;
+
+static int __init dt_cpu_ftrs_parse(char *str)
+{
+ if (!str)
+ return 0;
+
+ if (!strcmp(str, "off"))
+ using_dt_cpu_ftrs = false;
+ else if (!strcmp(str, "known"))
+ enable_unknown = false;
+ else
+ return 1;
+
+ return 0;
+}
+early_param("dt_cpu_ftrs", dt_cpu_ftrs_parse);
static void __init cpufeatures_setup_start(u32 isa)
{
@@ -707,7 +719,7 @@
}
}
- if (!known && CPU_FEATURE_ENABLE_UNKNOWN) {
+ if (!known && enable_unknown) {
if (!feat_try_enable_unknown(f)) {
pr_info("not enabling: %s (unknown and unsupported by kernel)\n",
f->name);
@@ -756,6 +768,26 @@
cur_cpu_spec->cpu_features, cur_cpu_spec->mmu_features);
}
+static int __init disabled_on_cmdline(void)
+{
+ unsigned long root, chosen;
+ const char *p;
+
+ root = of_get_flat_dt_root();
+ chosen = of_get_flat_dt_subnode_by_name(root, "chosen");
+ if (chosen == -FDT_ERR_NOTFOUND)
+ return false;
+
+ p = of_get_flat_dt_prop(chosen, "bootargs", NULL);
+ if (!p)
+ return false;
+
+ if (strstr(p, "dt_cpu_ftrs=off"))
+ return true;
+
+ return false;
+}
+
static int __init fdt_find_cpu_features(unsigned long node, const char *uname,
int depth, void *data)
{
@@ -766,8 +798,6 @@
return 0;
}
-static bool __initdata using_dt_cpu_ftrs = false;
-
bool __init dt_cpu_ftrs_in_use(void)
{
return using_dt_cpu_ftrs;
@@ -775,6 +805,8 @@
bool __init dt_cpu_ftrs_init(void *fdt)
{
+ using_dt_cpu_ftrs = false;
+
/* Setup and verify the FDT, if it fails we just bail */
if (!early_init_dt_verify(fdt))
return false;
@@ -782,6 +814,9 @@
if (!of_scan_flat_dt(fdt_find_cpu_features, NULL))
return false;
+ if (disabled_on_cmdline())
+ return false;
+
cpufeatures_setup_cpu();
using_dt_cpu_ftrs = true;
@@ -1027,5 +1062,8 @@
void __init dt_cpu_ftrs_scan(void)
{
+ if (!using_dt_cpu_ftrs)
+ return;
+
of_scan_flat_dt(dt_cpu_ftrs_scan_callback, NULL);
}
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index baae104..2ad725e 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -1666,6 +1666,7 @@
#ifdef CONFIG_VSX
current->thread.used_vsr = 0;
#endif
+ current->thread.load_fp = 0;
memset(¤t->thread.fp_state, 0, sizeof(current->thread.fp_state));
current->thread.fp_save_area = NULL;
#ifdef CONFIG_ALTIVEC
@@ -1674,6 +1675,7 @@
current->thread.vr_save_area = NULL;
current->thread.vrsave = 0;
current->thread.used_vr = 0;
+ current->thread.load_vec = 0;
#endif /* CONFIG_ALTIVEC */
#ifdef CONFIG_SPE
memset(current->thread.evr, 0, sizeof(current->thread.evr));
@@ -1685,6 +1687,7 @@
current->thread.tm_tfhar = 0;
current->thread.tm_texasr = 0;
current->thread.tm_tfiar = 0;
+ current->thread.load_tm = 0;
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
}
EXPORT_SYMBOL(start_thread);
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 71dcda9..857129a 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -928,7 +928,7 @@
#ifdef CONFIG_PPC_MM_SLICES
#ifdef CONFIG_PPC64
- init_mm.context.addr_limit = TASK_SIZE_128TB;
+ init_mm.context.addr_limit = DEFAULT_MAP_WINDOW_USER64;
#else
#error "context.addr_limit not initialized."
#endif
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index f35ff9d..a8c1f99 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -661,7 +661,7 @@
static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
{
- return __alloc_bootmem_node(NODE_DATA(cpu_to_node(cpu)), size, align,
+ return __alloc_bootmem_node(NODE_DATA(early_cpu_to_node(cpu)), size, align,
__pa(MAX_DMA_ADDRESS));
}
@@ -672,7 +672,7 @@
static int pcpu_cpu_distance(unsigned int from, unsigned int to)
{
- if (cpu_to_node(from) == cpu_to_node(to))
+ if (early_cpu_to_node(from) == early_cpu_to_node(to))
return LOCAL_DISTANCE;
else
return REMOTE_DISTANCE;
diff --git a/arch/powerpc/kvm/book3s_xive_template.c b/arch/powerpc/kvm/book3s_xive_template.c
index 023a311..4636ca6 100644
--- a/arch/powerpc/kvm/book3s_xive_template.c
+++ b/arch/powerpc/kvm/book3s_xive_template.c
@@ -69,7 +69,7 @@
{
/* If the XIVE supports the new "store EOI facility, use it */
if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI)
- __x_writeq(0, __x_eoi_page(xd));
+ __x_writeq(0, __x_eoi_page(xd) + XIVE_ESB_STORE_EOI);
else if (hw_irq && xd->flags & XIVE_IRQ_FLAG_EOI_FW) {
opal_int_eoi(hw_irq);
} else {
@@ -89,7 +89,7 @@
* properly.
*/
if (xd->flags & XIVE_IRQ_FLAG_LSI)
- __x_readq(__x_eoi_page(xd));
+ __x_readq(__x_eoi_page(xd) + XIVE_ESB_LOAD_EOI);
else {
eoi_val = GLUE(X_PFX,esb_load)(xd, XIVE_ESB_SET_PQ_00);
diff --git a/arch/powerpc/mm/hugetlbpage-radix.c b/arch/powerpc/mm/hugetlbpage-radix.c
index 6575b9a..a12e863 100644
--- a/arch/powerpc/mm/hugetlbpage-radix.c
+++ b/arch/powerpc/mm/hugetlbpage-radix.c
@@ -68,7 +68,7 @@
addr = ALIGN(addr, huge_page_size(h));
vma = find_vma(mm, addr);
if (mm->task_size - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
/*
diff --git a/arch/powerpc/mm/mmap.c b/arch/powerpc/mm/mmap.c
index 9dbd2a73..0ee6be4 100644
--- a/arch/powerpc/mm/mmap.c
+++ b/arch/powerpc/mm/mmap.c
@@ -112,7 +112,7 @@
addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
if (mm->task_size - len >= addr && addr >= mmap_min_addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
@@ -157,7 +157,7 @@
addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
if (mm->task_size - len >= addr && addr >= mmap_min_addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
diff --git a/arch/powerpc/mm/mmu_context_book3s64.c b/arch/powerpc/mm/mmu_context_book3s64.c
index c6dca2a..a3edf81 100644
--- a/arch/powerpc/mm/mmu_context_book3s64.c
+++ b/arch/powerpc/mm/mmu_context_book3s64.c
@@ -99,7 +99,7 @@
* mm->context.addr_limit. Default to max task size so that we copy the
* default values to paca which will help us to handle slb miss early.
*/
- mm->context.addr_limit = TASK_SIZE_128TB;
+ mm->context.addr_limit = DEFAULT_MAP_WINDOW_USER64;
/*
* The old code would re-promote on fork, we don't do that when using
diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
index 966b9fc..45f6740 100644
--- a/arch/powerpc/mm/slice.c
+++ b/arch/powerpc/mm/slice.c
@@ -99,7 +99,7 @@
if ((mm->task_size - len) < addr)
return 0;
vma = find_vma(mm, addr);
- return (!vma || (addr + len) <= vma->vm_start);
+ return (!vma || (addr + len) <= vm_start_gap(vma));
}
static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
diff --git a/arch/powerpc/perf/power9-pmu.c b/arch/powerpc/perf/power9-pmu.c
index 018f8e9..bb28e1a 100644
--- a/arch/powerpc/perf/power9-pmu.c
+++ b/arch/powerpc/perf/power9-pmu.c
@@ -402,7 +402,7 @@
.name = "POWER9",
.n_counter = MAX_PMU_COUNTERS,
.add_fields = ISA207_ADD_FIELDS,
- .test_adder = ISA207_TEST_ADDER,
+ .test_adder = P9_DD1_TEST_ADDER,
.compute_mmcr = isa207_compute_mmcr,
.config_bhrb = power9_config_bhrb,
.bhrb_filter_map = power9_bhrb_filter_map,
@@ -421,7 +421,7 @@
.name = "POWER9",
.n_counter = MAX_PMU_COUNTERS,
.add_fields = ISA207_ADD_FIELDS,
- .test_adder = P9_DD1_TEST_ADDER,
+ .test_adder = ISA207_TEST_ADDER,
.compute_mmcr = isa207_compute_mmcr,
.config_bhrb = power9_config_bhrb,
.bhrb_filter_map = power9_bhrb_filter_map,
diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig
index 33244e3..4fd64d3 100644
--- a/arch/powerpc/platforms/Kconfig
+++ b/arch/powerpc/platforms/Kconfig
@@ -59,6 +59,17 @@
In case of doubt, say Y
+config PPC_DT_CPU_FTRS
+ bool "Device-tree based CPU feature discovery & setup"
+ depends on PPC_BOOK3S_64
+ default y
+ help
+ This enables code to use a new device tree binding for describing CPU
+ compatibility and features. Saying Y here will attempt to use the new
+ binding if the firmware provides it. Currently only the skiboot
+ firmware provides this binding.
+ If you're not sure say Y.
+
config UDBG_RTAS_CONSOLE
bool "RTAS based debug console"
depends on PPC_RTAS
diff --git a/arch/powerpc/platforms/cell/spufs/coredump.c b/arch/powerpc/platforms/cell/spufs/coredump.c
index e5a891a..84b7ac9 100644
--- a/arch/powerpc/platforms/cell/spufs/coredump.c
+++ b/arch/powerpc/platforms/cell/spufs/coredump.c
@@ -175,6 +175,8 @@
skip = roundup(cprm->pos - total + sz, 4) - cprm->pos;
if (!dump_skip(cprm, skip))
goto Eio;
+
+ rc = 0;
out:
free_page((unsigned long)buf);
return rc;
diff --git a/arch/powerpc/platforms/powernv/npu-dma.c b/arch/powerpc/platforms/powernv/npu-dma.c
index 78fa939..e6f444b 100644
--- a/arch/powerpc/platforms/powernv/npu-dma.c
+++ b/arch/powerpc/platforms/powernv/npu-dma.c
@@ -75,7 +75,8 @@
if (WARN_ON(!gpdev))
return NULL;
- if (WARN_ON(!gpdev->dev.of_node))
+ /* Not all PCI devices have device-tree nodes */
+ if (!gpdev->dev.of_node)
return NULL;
/* Get assoicated PCI device */
diff --git a/arch/powerpc/platforms/powernv/subcore.c b/arch/powerpc/platforms/powernv/subcore.c
index 0babef1..8c61192 100644
--- a/arch/powerpc/platforms/powernv/subcore.c
+++ b/arch/powerpc/platforms/powernv/subcore.c
@@ -407,7 +407,13 @@
static int subcore_init(void)
{
- if (!cpu_has_feature(CPU_FTR_SUBCORE))
+ unsigned pvr_ver;
+
+ pvr_ver = PVR_VER(mfspr(SPRN_PVR));
+
+ if (pvr_ver != PVR_POWER8 &&
+ pvr_ver != PVR_POWER8E &&
+ pvr_ver != PVR_POWER8NVL)
return 0;
/*
diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
index e104c71..1fb162b 100644
--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
+++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
@@ -124,6 +124,7 @@
for (i = 0; i < num_lmbs; i++) {
lmbs[i].base_addr = be64_to_cpu(lmbs[i].base_addr);
lmbs[i].drc_index = be32_to_cpu(lmbs[i].drc_index);
+ lmbs[i].aa_index = be32_to_cpu(lmbs[i].aa_index);
lmbs[i].flags = be32_to_cpu(lmbs[i].flags);
}
@@ -147,6 +148,7 @@
for (i = 0; i < num_lmbs; i++) {
lmbs[i].base_addr = cpu_to_be64(lmbs[i].base_addr);
lmbs[i].drc_index = cpu_to_be32(lmbs[i].drc_index);
+ lmbs[i].aa_index = cpu_to_be32(lmbs[i].aa_index);
lmbs[i].flags = cpu_to_be32(lmbs[i].flags);
}
diff --git a/arch/powerpc/sysdev/simple_gpio.c b/arch/powerpc/sysdev/simple_gpio.c
index ef470b4..6afddae 100644
--- a/arch/powerpc/sysdev/simple_gpio.c
+++ b/arch/powerpc/sysdev/simple_gpio.c
@@ -75,7 +75,8 @@
static void u8_gpio_save_regs(struct of_mm_gpio_chip *mm_gc)
{
- struct u8_gpio_chip *u8_gc = gpiochip_get_data(&mm_gc->gc);
+ struct u8_gpio_chip *u8_gc =
+ container_of(mm_gc, struct u8_gpio_chip, mm_gc);
u8_gc->data = in_8(mm_gc->regs);
}
diff --git a/arch/powerpc/sysdev/xive/common.c b/arch/powerpc/sysdev/xive/common.c
index 9138250..8f5e303 100644
--- a/arch/powerpc/sysdev/xive/common.c
+++ b/arch/powerpc/sysdev/xive/common.c
@@ -297,7 +297,7 @@
{
/* If the XIVE supports the new "store EOI facility, use it */
if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI)
- out_be64(xd->eoi_mmio, 0);
+ out_be64(xd->eoi_mmio + XIVE_ESB_STORE_EOI, 0);
else if (hw_irq && xd->flags & XIVE_IRQ_FLAG_EOI_FW) {
/*
* The FW told us to call it. This happens for some
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index e161faf..6967add 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -363,9 +363,6 @@
config SYSVIPC_COMPAT
def_bool y if COMPAT && SYSVIPC
-config KEYS_COMPAT
- def_bool y if COMPAT && KEYS
-
config SMP
def_bool y
prompt "Symmetric multi-processing support"
diff --git a/arch/s390/configs/default_defconfig b/arch/s390/configs/default_defconfig
index a5039fa..2820722 100644
--- a/arch/s390/configs/default_defconfig
+++ b/arch/s390/configs/default_defconfig
@@ -30,6 +30,7 @@
CONFIG_SCHED_AUTOGROUP=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_EXPERT=y
+# CONFIG_SYSFS_SYSCALL is not set
CONFIG_BPF_SYSCALL=y
CONFIG_USERFAULTFD=y
# CONFIG_COMPAT_BRK is not set
@@ -44,7 +45,10 @@
CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_MODVERSIONS=y
CONFIG_MODULE_SRCVERSION_ALL=y
+CONFIG_BLK_DEV_INTEGRITY=y
CONFIG_BLK_DEV_THROTTLING=y
+CONFIG_BLK_WBT=y
+CONFIG_BLK_WBT_SQ=y
CONFIG_PARTITION_ADVANCED=y
CONFIG_IBM_PARTITION=y
CONFIG_BSD_DISKLABEL=y
@@ -90,6 +94,8 @@
CONFIG_UNIX_DIAG=m
CONFIG_XFRM_USER=m
CONFIG_NET_KEY=m
+CONFIG_SMC=m
+CONFIG_SMC_DIAG=m
CONFIG_INET=y
CONFIG_IP_MULTICAST=y
CONFIG_IP_ADVANCED_ROUTER=y
@@ -359,6 +365,7 @@
CONFIG_NET_ACT_SKBEDIT=m
CONFIG_NET_ACT_CSUM=m
CONFIG_DNS_RESOLVER=y
+CONFIG_NETLINK_DIAG=m
CONFIG_CGROUP_NET_PRIO=y
CONFIG_BPF_JIT=y
CONFIG_NET_PKTGEN=m
@@ -367,16 +374,19 @@
CONFIG_DMA_CMA=y
CONFIG_CMA_SIZE_MBYTES=0
CONFIG_CONNECTOR=y
+CONFIG_ZRAM=m
CONFIG_BLK_DEV_LOOP=m
CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_DRBD=m
CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_OSD=m
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=32768
-CONFIG_CDROM_PKTCDVD=m
-CONFIG_ATA_OVER_ETH=m
+CONFIG_BLK_DEV_RAM_DAX=y
CONFIG_VIRTIO_BLK=y
+CONFIG_BLK_DEV_RBD=m
CONFIG_ENCLOSURE_SERVICES=m
+CONFIG_GENWQE=m
CONFIG_RAID_ATTRS=m
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
@@ -442,6 +452,8 @@
# CONFIG_NET_VENDOR_INTEL is not set
# CONFIG_NET_VENDOR_MARVELL is not set
CONFIG_MLX4_EN=m
+CONFIG_MLX5_CORE=m
+CONFIG_MLX5_CORE_EN=y
# CONFIG_NET_VENDOR_NATSEMI is not set
CONFIG_PPP=m
CONFIG_PPP_BSDCOMP=m
@@ -452,7 +464,6 @@
CONFIG_PPPOL2TP=m
CONFIG_PPP_ASYNC=m
CONFIG_PPP_SYNC_TTY=m
-# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set
@@ -471,6 +482,7 @@
CONFIG_INFINIBAND=m
CONFIG_INFINIBAND_USER_ACCESS=m
CONFIG_MLX4_INFINIBAND=m
+CONFIG_MLX5_INFINIBAND=m
CONFIG_VIRTIO_BALLOON=m
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
@@ -487,12 +499,18 @@
CONFIG_XFS_RT=y
CONFIG_XFS_DEBUG=y
CONFIG_GFS2_FS=m
+CONFIG_GFS2_FS_LOCKING_DLM=y
CONFIG_OCFS2_FS=m
CONFIG_BTRFS_FS=y
CONFIG_BTRFS_FS_POSIX_ACL=y
+CONFIG_BTRFS_DEBUG=y
CONFIG_NILFS2_FS=m
+CONFIG_FS_DAX=y
+CONFIG_EXPORTFS_BLOCK_OPS=y
CONFIG_FANOTIFY=y
+CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
+CONFIG_QUOTA_DEBUG=y
CONFIG_QFMT_V1=m
CONFIG_QFMT_V2=m
CONFIG_AUTOFS4_FS=m
@@ -558,6 +576,7 @@
CONFIG_DEBUG_SECTION_MISMATCH=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_PAGEALLOC=y
+CONFIG_DEBUG_RODATA_TEST=y
CONFIG_DEBUG_OBJECTS=y
CONFIG_DEBUG_OBJECTS_SELFTEST=y
CONFIG_DEBUG_OBJECTS_FREE=y
@@ -580,7 +599,6 @@
CONFIG_WQ_WATCHDOG=y
CONFIG_PANIC_ON_OOPS=y
CONFIG_DEBUG_TIMEKEEPING=y
-CONFIG_TIMER_STATS=y
CONFIG_DEBUG_RT_MUTEXES=y
CONFIG_DEBUG_WW_MUTEX_SLOWPATH=y
CONFIG_PROVE_LOCKING=y
@@ -595,6 +613,7 @@
CONFIG_RCU_CPU_STALL_TIMEOUT=300
CONFIG_NOTIFIER_ERROR_INJECTION=m
CONFIG_PM_NOTIFIER_ERROR_INJECT=m
+CONFIG_NETDEV_NOTIFIER_ERROR_INJECT=m
CONFIG_FAULT_INJECTION=y
CONFIG_FAILSLAB=y
CONFIG_FAIL_PAGE_ALLOC=y
@@ -616,13 +635,12 @@
CONFIG_TRACE_ENUM_MAP_FILE=y
CONFIG_LKDTM=m
CONFIG_TEST_LIST_SORT=y
+CONFIG_TEST_SORT=y
CONFIG_KPROBES_SANITY_TEST=y
CONFIG_RBTREE_TEST=y
CONFIG_INTERVAL_TREE_TEST=m
CONFIG_PERCPU_TEST=m
CONFIG_ATOMIC64_SELFTEST=y
-CONFIG_TEST_STRING_HELPERS=y
-CONFIG_TEST_KSTRTOX=y
CONFIG_DMA_API_DEBUG=y
CONFIG_TEST_BPF=m
CONFIG_BUG_ON_DATA_CORRUPTION=y
@@ -630,6 +648,7 @@
CONFIG_ENCRYPTED_KEYS=m
CONFIG_SECURITY=y
CONFIG_SECURITY_NETWORK=y
+CONFIG_HARDENED_USERCOPY=y
CONFIG_SECURITY_SELINUX=y
CONFIG_SECURITY_SELINUX_BOOTPARAM=y
CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
@@ -640,7 +659,9 @@
CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_USER=m
+CONFIG_CRYPTO_PCRYPT=m
CONFIG_CRYPTO_CRYPTD=m
+CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CCM=m
CONFIG_CRYPTO_GCM=m
@@ -648,6 +669,7 @@
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_KEYWRAP=m
+CONFIG_CRYPTO_CMAC=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_CRC32=m
@@ -657,8 +679,10 @@
CONFIG_CRYPTO_RMD256=m
CONFIG_CRYPTO_RMD320=m
CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_SHA3=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_AES_TI=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAMELLIA=m
@@ -674,6 +698,7 @@
CONFIG_CRYPTO_842=m
CONFIG_CRYPTO_LZ4=m
CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_ANSI_CPRNG=m
CONFIG_CRYPTO_USER_API_HASH=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m
@@ -685,6 +710,7 @@
CONFIG_CRYPTO_SHA512_S390=m
CONFIG_CRYPTO_DES_S390=m
CONFIG_CRYPTO_AES_S390=m
+CONFIG_CRYPTO_PAES_S390=m
CONFIG_CRYPTO_GHASH_S390=m
CONFIG_CRYPTO_CRC32_S390=y
CONFIG_ASYMMETRIC_KEY_TYPE=y
@@ -692,6 +718,7 @@
CONFIG_X509_CERTIFICATE_PARSER=m
CONFIG_CRC7=m
CONFIG_CRC8=m
+CONFIG_RANDOM32_SELFTEST=y
CONFIG_CORDIC=m
CONFIG_CMM=m
CONFIG_APPLDATA_BASE=y
diff --git a/arch/s390/configs/gcov_defconfig b/arch/s390/configs/gcov_defconfig
index 83970b5..3c6b781 100644
--- a/arch/s390/configs/gcov_defconfig
+++ b/arch/s390/configs/gcov_defconfig
@@ -31,6 +31,7 @@
CONFIG_SCHED_AUTOGROUP=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_EXPERT=y
+# CONFIG_SYSFS_SYSCALL is not set
CONFIG_BPF_SYSCALL=y
CONFIG_USERFAULTFD=y
# CONFIG_COMPAT_BRK is not set
@@ -46,7 +47,10 @@
CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_MODVERSIONS=y
CONFIG_MODULE_SRCVERSION_ALL=y
+CONFIG_BLK_DEV_INTEGRITY=y
CONFIG_BLK_DEV_THROTTLING=y
+CONFIG_BLK_WBT=y
+CONFIG_BLK_WBT_SQ=y
CONFIG_PARTITION_ADVANCED=y
CONFIG_IBM_PARTITION=y
CONFIG_BSD_DISKLABEL=y
@@ -88,6 +92,8 @@
CONFIG_UNIX_DIAG=m
CONFIG_XFRM_USER=m
CONFIG_NET_KEY=m
+CONFIG_SMC=m
+CONFIG_SMC_DIAG=m
CONFIG_INET=y
CONFIG_IP_MULTICAST=y
CONFIG_IP_ADVANCED_ROUTER=y
@@ -356,6 +362,7 @@
CONFIG_NET_ACT_SKBEDIT=m
CONFIG_NET_ACT_CSUM=m
CONFIG_DNS_RESOLVER=y
+CONFIG_NETLINK_DIAG=m
CONFIG_CGROUP_NET_PRIO=y
CONFIG_BPF_JIT=y
CONFIG_NET_PKTGEN=m
@@ -364,16 +371,18 @@
CONFIG_DMA_CMA=y
CONFIG_CMA_SIZE_MBYTES=0
CONFIG_CONNECTOR=y
+CONFIG_ZRAM=m
CONFIG_BLK_DEV_LOOP=m
CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_DRBD=m
CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_OSD=m
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=32768
-CONFIG_CDROM_PKTCDVD=m
-CONFIG_ATA_OVER_ETH=m
+CONFIG_BLK_DEV_RAM_DAX=y
CONFIG_VIRTIO_BLK=y
CONFIG_ENCLOSURE_SERVICES=m
+CONFIG_GENWQE=m
CONFIG_RAID_ATTRS=m
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
@@ -439,6 +448,8 @@
# CONFIG_NET_VENDOR_INTEL is not set
# CONFIG_NET_VENDOR_MARVELL is not set
CONFIG_MLX4_EN=m
+CONFIG_MLX5_CORE=m
+CONFIG_MLX5_CORE_EN=y
# CONFIG_NET_VENDOR_NATSEMI is not set
CONFIG_PPP=m
CONFIG_PPP_BSDCOMP=m
@@ -449,7 +460,6 @@
CONFIG_PPPOL2TP=m
CONFIG_PPP_ASYNC=m
CONFIG_PPP_SYNC_TTY=m
-# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set
@@ -468,6 +478,7 @@
CONFIG_INFINIBAND=m
CONFIG_INFINIBAND_USER_ACCESS=m
CONFIG_MLX4_INFINIBAND=m
+CONFIG_MLX5_INFINIBAND=m
CONFIG_VIRTIO_BALLOON=m
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
@@ -483,11 +494,15 @@
CONFIG_XFS_POSIX_ACL=y
CONFIG_XFS_RT=y
CONFIG_GFS2_FS=m
+CONFIG_GFS2_FS_LOCKING_DLM=y
CONFIG_OCFS2_FS=m
CONFIG_BTRFS_FS=y
CONFIG_BTRFS_FS_POSIX_ACL=y
CONFIG_NILFS2_FS=m
+CONFIG_FS_DAX=y
+CONFIG_EXPORTFS_BLOCK_OPS=y
CONFIG_FANOTIFY=y
+CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
CONFIG_QFMT_V1=m
CONFIG_QFMT_V2=m
@@ -553,7 +568,6 @@
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_MEMORY_INIT=y
CONFIG_PANIC_ON_OOPS=y
-CONFIG_TIMER_STATS=y
CONFIG_RCU_TORTURE_TEST=m
CONFIG_RCU_CPU_STALL_TIMEOUT=60
CONFIG_LATENCYTOP=y
@@ -576,6 +590,7 @@
CONFIG_ENCRYPTED_KEYS=m
CONFIG_SECURITY=y
CONFIG_SECURITY_NETWORK=y
+CONFIG_HARDENED_USERCOPY=y
CONFIG_SECURITY_SELINUX=y
CONFIG_SECURITY_SELINUX_BOOTPARAM=y
CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
@@ -599,6 +614,7 @@
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_KEYWRAP=m
+CONFIG_CRYPTO_CMAC=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_CRC32=m
@@ -611,6 +627,7 @@
CONFIG_CRYPTO_SHA3=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_AES_TI=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAMELLIA=m
@@ -626,16 +643,19 @@
CONFIG_CRYPTO_842=m
CONFIG_CRYPTO_LZ4=m
CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_ANSI_CPRNG=m
CONFIG_CRYPTO_USER_API_HASH=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
CONFIG_ZCRYPT=m
+CONFIG_PKEY=m
CONFIG_CRYPTO_SHA1_S390=m
CONFIG_CRYPTO_SHA256_S390=m
CONFIG_CRYPTO_SHA512_S390=m
CONFIG_CRYPTO_DES_S390=m
CONFIG_CRYPTO_AES_S390=m
+CONFIG_CRYPTO_PAES_S390=m
CONFIG_CRYPTO_GHASH_S390=m
CONFIG_CRYPTO_CRC32_S390=y
CONFIG_CRC7=m
diff --git a/arch/s390/configs/performance_defconfig b/arch/s390/configs/performance_defconfig
index fbc6542..653d72b 100644
--- a/arch/s390/configs/performance_defconfig
+++ b/arch/s390/configs/performance_defconfig
@@ -31,6 +31,7 @@
CONFIG_SCHED_AUTOGROUP=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_EXPERT=y
+# CONFIG_SYSFS_SYSCALL is not set
CONFIG_BPF_SYSCALL=y
CONFIG_USERFAULTFD=y
# CONFIG_COMPAT_BRK is not set
@@ -44,7 +45,10 @@
CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_MODVERSIONS=y
CONFIG_MODULE_SRCVERSION_ALL=y
+CONFIG_BLK_DEV_INTEGRITY=y
CONFIG_BLK_DEV_THROTTLING=y
+CONFIG_BLK_WBT=y
+CONFIG_BLK_WBT_SQ=y
CONFIG_PARTITION_ADVANCED=y
CONFIG_IBM_PARTITION=y
CONFIG_BSD_DISKLABEL=y
@@ -86,6 +90,8 @@
CONFIG_UNIX_DIAG=m
CONFIG_XFRM_USER=m
CONFIG_NET_KEY=m
+CONFIG_SMC=m
+CONFIG_SMC_DIAG=m
CONFIG_INET=y
CONFIG_IP_MULTICAST=y
CONFIG_IP_ADVANCED_ROUTER=y
@@ -354,6 +360,7 @@
CONFIG_NET_ACT_SKBEDIT=m
CONFIG_NET_ACT_CSUM=m
CONFIG_DNS_RESOLVER=y
+CONFIG_NETLINK_DIAG=m
CONFIG_CGROUP_NET_PRIO=y
CONFIG_BPF_JIT=y
CONFIG_NET_PKTGEN=m
@@ -362,16 +369,18 @@
CONFIG_DMA_CMA=y
CONFIG_CMA_SIZE_MBYTES=0
CONFIG_CONNECTOR=y
+CONFIG_ZRAM=m
CONFIG_BLK_DEV_LOOP=m
CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_DRBD=m
CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_OSD=m
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=32768
-CONFIG_CDROM_PKTCDVD=m
-CONFIG_ATA_OVER_ETH=m
+CONFIG_BLK_DEV_RAM_DAX=y
CONFIG_VIRTIO_BLK=y
CONFIG_ENCLOSURE_SERVICES=m
+CONFIG_GENWQE=m
CONFIG_RAID_ATTRS=m
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
@@ -437,6 +446,8 @@
# CONFIG_NET_VENDOR_INTEL is not set
# CONFIG_NET_VENDOR_MARVELL is not set
CONFIG_MLX4_EN=m
+CONFIG_MLX5_CORE=m
+CONFIG_MLX5_CORE_EN=y
# CONFIG_NET_VENDOR_NATSEMI is not set
CONFIG_PPP=m
CONFIG_PPP_BSDCOMP=m
@@ -447,7 +458,6 @@
CONFIG_PPPOL2TP=m
CONFIG_PPP_ASYNC=m
CONFIG_PPP_SYNC_TTY=m
-# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set
@@ -466,6 +476,7 @@
CONFIG_INFINIBAND=m
CONFIG_INFINIBAND_USER_ACCESS=m
CONFIG_MLX4_INFINIBAND=m
+CONFIG_MLX5_INFINIBAND=m
CONFIG_VIRTIO_BALLOON=m
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
@@ -481,11 +492,15 @@
CONFIG_XFS_POSIX_ACL=y
CONFIG_XFS_RT=y
CONFIG_GFS2_FS=m
+CONFIG_GFS2_FS_LOCKING_DLM=y
CONFIG_OCFS2_FS=m
CONFIG_BTRFS_FS=y
CONFIG_BTRFS_FS_POSIX_ACL=y
CONFIG_NILFS2_FS=m
+CONFIG_FS_DAX=y
+CONFIG_EXPORTFS_BLOCK_OPS=y
CONFIG_FANOTIFY=y
+CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
CONFIG_QFMT_V1=m
CONFIG_QFMT_V2=m
@@ -551,7 +566,6 @@
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_MEMORY_INIT=y
CONFIG_PANIC_ON_OOPS=y
-CONFIG_TIMER_STATS=y
CONFIG_RCU_TORTURE_TEST=m
CONFIG_RCU_CPU_STALL_TIMEOUT=60
CONFIG_LATENCYTOP=y
@@ -574,6 +588,7 @@
CONFIG_ENCRYPTED_KEYS=m
CONFIG_SECURITY=y
CONFIG_SECURITY_NETWORK=y
+CONFIG_HARDENED_USERCOPY=y
CONFIG_SECURITY_SELINUX=y
CONFIG_SECURITY_SELINUX_BOOTPARAM=y
CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
@@ -597,6 +612,7 @@
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_KEYWRAP=m
+CONFIG_CRYPTO_CMAC=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_CRC32=m
@@ -609,6 +625,7 @@
CONFIG_CRYPTO_SHA3=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_AES_TI=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAMELLIA=m
@@ -624,6 +641,7 @@
CONFIG_CRYPTO_842=m
CONFIG_CRYPTO_LZ4=m
CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_ANSI_CPRNG=m
CONFIG_CRYPTO_USER_API_HASH=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m
@@ -635,6 +653,7 @@
CONFIG_CRYPTO_SHA512_S390=m
CONFIG_CRYPTO_DES_S390=m
CONFIG_CRYPTO_AES_S390=m
+CONFIG_CRYPTO_PAES_S390=m
CONFIG_CRYPTO_GHASH_S390=m
CONFIG_CRYPTO_CRC32_S390=y
CONFIG_CRC7=m
diff --git a/arch/s390/configs/zfcpdump_defconfig b/arch/s390/configs/zfcpdump_defconfig
index e23d97c1..afa46a7 100644
--- a/arch/s390/configs/zfcpdump_defconfig
+++ b/arch/s390/configs/zfcpdump_defconfig
@@ -12,8 +12,10 @@
CONFIG_NR_CPUS=2
# CONFIG_HOTPLUG_CPU is not set
CONFIG_HZ_100=y
+# CONFIG_ARCH_RANDOM is not set
# CONFIG_COMPACTION is not set
# CONFIG_MIGRATION is not set
+# CONFIG_BOUNCE is not set
# CONFIG_CHECK_STACK is not set
# CONFIG_CHSC_SCH is not set
# CONFIG_SCM_BUS is not set
@@ -36,11 +38,11 @@
CONFIG_SCSI_LOGGING=y
CONFIG_SCSI_FC_ATTRS=y
CONFIG_ZFCP=y
-# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set
# CONFIG_HVC_IUCV is not set
+# CONFIG_HW_RANDOM_S390 is not set
CONFIG_RAW_DRIVER=y
# CONFIG_SCLP_ASYNC is not set
# CONFIG_HMC_DRV is not set
@@ -54,9 +56,9 @@
# CONFIG_INOTIFY_USER is not set
CONFIG_CONFIGFS_FS=y
# CONFIG_MISC_FILESYSTEMS is not set
+# CONFIG_NETWORK_FILESYSTEMS is not set
CONFIG_PRINTK_TIME=y
CONFIG_DEBUG_INFO=y
-CONFIG_DEBUG_FS=y
CONFIG_DEBUG_KERNEL=y
CONFIG_PANIC_ON_OOPS=y
# CONFIG_SCHED_DEBUG is not set
diff --git a/arch/s390/defconfig b/arch/s390/defconfig
index 97189db..20244a3 100644
--- a/arch/s390/defconfig
+++ b/arch/s390/defconfig
@@ -28,6 +28,7 @@
CONFIG_USER_NS=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_EXPERT=y
+# CONFIG_SYSFS_SYSCALL is not set
CONFIG_BPF_SYSCALL=y
CONFIG_USERFAULTFD=y
# CONFIG_COMPAT_BRK is not set
@@ -108,7 +109,6 @@
CONFIG_SCSI_VIRTIO=y
CONFIG_MD=y
CONFIG_MD_LINEAR=m
-CONFIG_MD_RAID0=m
CONFIG_MD_MULTIPATH=m
CONFIG_BLK_DEV_DM=y
CONFIG_DM_CRYPT=m
@@ -131,6 +131,7 @@
CONFIG_VIRTIO_NET=y
# CONFIG_NET_VENDOR_ALACRITECH is not set
# CONFIG_NET_VENDOR_SOLARFLARE is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
# CONFIG_INPUT is not set
# CONFIG_SERIO is not set
CONFIG_DEVKMEM=y
@@ -162,7 +163,6 @@
CONFIG_DEBUG_PAGEALLOC=y
CONFIG_DETECT_HUNG_TASK=y
CONFIG_PANIC_ON_OOPS=y
-CONFIG_TIMER_STATS=y
CONFIG_DEBUG_RT_MUTEXES=y
CONFIG_PROVE_LOCKING=y
CONFIG_LOCK_STAT=y
@@ -172,14 +172,12 @@
CONFIG_DEBUG_SG=y
CONFIG_DEBUG_NOTIFIERS=y
CONFIG_RCU_CPU_STALL_TIMEOUT=60
-CONFIG_RCU_TRACE=y
CONFIG_LATENCYTOP=y
CONFIG_SCHED_TRACER=y
CONFIG_FTRACE_SYSCALLS=y
CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP=y
CONFIG_STACK_TRACER=y
CONFIG_BLK_DEV_IO_TRACE=y
-CONFIG_UPROBE_EVENTS=y
CONFIG_FUNCTION_PROFILER=y
CONFIG_TRACE_ENUM_MAP_FILE=y
CONFIG_KPROBES_SANITY_TEST=y
@@ -190,7 +188,6 @@
CONFIG_CRYPTO_GCM=m
CONFIG_CRYPTO_CBC=y
CONFIG_CRYPTO_CTS=m
-CONFIG_CRYPTO_ECB=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XTS=m
@@ -230,6 +227,7 @@
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_ZCRYPT=m
CONFIG_PKEY=m
+CONFIG_CRYPTO_PAES_S390=m
CONFIG_CRYPTO_SHA1_S390=m
CONFIG_CRYPTO_SHA256_S390=m
CONFIG_CRYPTO_SHA512_S390=m
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index 426614a..65d07ac 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -541,7 +541,6 @@
struct mutex ais_lock;
u8 simm;
u8 nimm;
- int ais_enabled;
};
struct kvm_hw_wp_info_arch {
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index e408d9c..6315037 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -231,12 +231,17 @@
lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
.Lsie_done:
# some program checks are suppressing. C code (e.g. do_protection_exception)
-# will rewind the PSW by the ILC, which is 4 bytes in case of SIE. Other
-# instructions between sie64a and .Lsie_done should not cause program
-# interrupts. So lets use a nop (47 00 00 00) as a landing pad.
+# will rewind the PSW by the ILC, which is often 4 bytes in case of SIE. There
+# are some corner cases (e.g. runtime instrumentation) where ILC is unpredictable.
+# Other instructions between sie64a and .Lsie_done should not cause program
+# interrupts. So lets use 3 nops as a landing pad for all possible rewinds.
# See also .Lcleanup_sie
-.Lrewind_pad:
- nop 0
+.Lrewind_pad6:
+ nopr 7
+.Lrewind_pad4:
+ nopr 7
+.Lrewind_pad2:
+ nopr 7
.globl sie_exit
sie_exit:
lg %r14,__SF_EMPTY+8(%r15) # load guest register save area
@@ -249,7 +254,9 @@
stg %r14,__SF_EMPTY+16(%r15) # set exit reason code
j sie_exit
- EX_TABLE(.Lrewind_pad,.Lsie_fault)
+ EX_TABLE(.Lrewind_pad6,.Lsie_fault)
+ EX_TABLE(.Lrewind_pad4,.Lsie_fault)
+ EX_TABLE(.Lrewind_pad2,.Lsie_fault)
EX_TABLE(sie_exit,.Lsie_fault)
EXPORT_SYMBOL(sie64a)
EXPORT_SYMBOL(sie_exit)
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index caf15c8..2d120fe 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -2160,7 +2160,7 @@
struct kvm_s390_ais_req req;
int ret = 0;
- if (!fi->ais_enabled)
+ if (!test_kvm_facility(kvm, 72))
return -ENOTSUPP;
if (copy_from_user(&req, (void __user *)attr->addr, sizeof(req)))
@@ -2204,7 +2204,7 @@
};
int ret = 0;
- if (!fi->ais_enabled || !adapter->suppressible)
+ if (!test_kvm_facility(kvm, 72) || !adapter->suppressible)
return kvm_s390_inject_vm(kvm, &s390int);
mutex_lock(&fi->ais_lock);
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 689ac48..f28e2e7 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -558,7 +558,6 @@
} else {
set_kvm_facility(kvm->arch.model.fac_mask, 72);
set_kvm_facility(kvm->arch.model.fac_list, 72);
- kvm->arch.float_int.ais_enabled = 1;
r = 0;
}
mutex_unlock(&kvm->lock);
@@ -1533,7 +1532,6 @@
mutex_init(&kvm->arch.float_int.ais_lock);
kvm->arch.float_int.simm = 0;
kvm->arch.float_int.nimm = 0;
- kvm->arch.float_int.ais_enabled = 0;
spin_lock_init(&kvm->arch.float_int.lock);
for (i = 0; i < FIRQ_LIST_COUNT; i++)
INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
index b017dae..b854b1d 100644
--- a/arch/s390/mm/mmap.c
+++ b/arch/s390/mm/mmap.c
@@ -101,7 +101,7 @@
addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
goto check_asce_limit;
}
@@ -151,7 +151,7 @@
addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
goto check_asce_limit;
}
diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
index 08e7af0..6a1a129 100644
--- a/arch/sh/mm/mmap.c
+++ b/arch/sh/mm/mmap.c
@@ -64,7 +64,7 @@
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
@@ -114,7 +114,7 @@
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 58243b0..5639c9f 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -192,9 +192,9 @@
int "Maximum number of CPUs"
depends on SMP
range 2 32 if SPARC32
- range 2 1024 if SPARC64
+ range 2 4096 if SPARC64
default 32 if SPARC32
- default 64 if SPARC64
+ default 4096 if SPARC64
source kernel/Kconfig.hz
@@ -295,9 +295,13 @@
depends on SPARC64 && SMP
config NODES_SHIFT
- int
- default "4"
+ int "Maximum NUMA Nodes (as a power of 2)"
+ range 4 5 if SPARC64
+ default "5"
depends on NEED_MULTIPLE_NODES
+ help
+ Specify the maximum number of NUMA Nodes available on the target
+ system. Increases memory reserved to accommodate various tables.
# Some NUMA nodes have memory ranges that span
# other nodes. Even though a pfn is valid and
@@ -573,9 +577,6 @@
depends on COMPAT && SYSVIPC
default y
-config KEYS_COMPAT
- def_bool y if COMPAT && KEYS
-
endmenu
source "net/Kconfig"
diff --git a/arch/sparc/include/asm/mmu_64.h b/arch/sparc/include/asm/mmu_64.h
index f7de0db..83b36a5 100644
--- a/arch/sparc/include/asm/mmu_64.h
+++ b/arch/sparc/include/asm/mmu_64.h
@@ -52,7 +52,7 @@
#define CTX_NR_MASK TAG_CONTEXT_BITS
#define CTX_HW_MASK (CTX_NR_MASK | CTX_PGSZ_MASK)
-#define CTX_FIRST_VERSION ((_AC(1,UL) << CTX_VERSION_SHIFT) + _AC(1,UL))
+#define CTX_FIRST_VERSION BIT(CTX_VERSION_SHIFT)
#define CTX_VALID(__ctx) \
(!(((__ctx.sparc64_ctx_val) ^ tlb_context_cache) & CTX_VERSION_MASK))
#define CTX_HWBITS(__ctx) ((__ctx.sparc64_ctx_val) & CTX_HW_MASK)
diff --git a/arch/sparc/include/asm/mmu_context_64.h b/arch/sparc/include/asm/mmu_context_64.h
index 22fede6..2cddcda 100644
--- a/arch/sparc/include/asm/mmu_context_64.h
+++ b/arch/sparc/include/asm/mmu_context_64.h
@@ -19,13 +19,8 @@
extern unsigned long tlb_context_cache;
extern unsigned long mmu_context_bmap[];
+DECLARE_PER_CPU(struct mm_struct *, per_cpu_secondary_mm);
void get_new_mmu_context(struct mm_struct *mm);
-#ifdef CONFIG_SMP
-void smp_new_mmu_context_version(void);
-#else
-#define smp_new_mmu_context_version() do { } while (0)
-#endif
-
int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
void destroy_context(struct mm_struct *mm);
@@ -76,8 +71,9 @@
static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk)
{
unsigned long ctx_valid, flags;
- int cpu;
+ int cpu = smp_processor_id();
+ per_cpu(per_cpu_secondary_mm, cpu) = mm;
if (unlikely(mm == &init_mm))
return;
@@ -123,7 +119,6 @@
* for the first time, we must flush that context out of the
* local TLB.
*/
- cpu = smp_processor_id();
if (!ctx_valid || !cpumask_test_cpu(cpu, mm_cpumask(mm))) {
cpumask_set_cpu(cpu, mm_cpumask(mm));
__flush_tlb_mm(CTX_HWBITS(mm->context),
@@ -133,26 +128,7 @@
}
#define deactivate_mm(tsk,mm) do { } while (0)
-
-/* Activate a new MM instance for the current task. */
-static inline void activate_mm(struct mm_struct *active_mm, struct mm_struct *mm)
-{
- unsigned long flags;
- int cpu;
-
- spin_lock_irqsave(&mm->context.lock, flags);
- if (!CTX_VALID(mm->context))
- get_new_mmu_context(mm);
- cpu = smp_processor_id();
- if (!cpumask_test_cpu(cpu, mm_cpumask(mm)))
- cpumask_set_cpu(cpu, mm_cpumask(mm));
-
- load_secondary_context(mm);
- __flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT);
- tsb_context_switch(mm);
- spin_unlock_irqrestore(&mm->context.lock, flags);
-}
-
+#define activate_mm(active_mm, mm) switch_mm(active_mm, mm, NULL)
#endif /* !(__ASSEMBLY__) */
#endif /* !(__SPARC64_MMU_CONTEXT_H) */
diff --git a/arch/sparc/include/asm/pil.h b/arch/sparc/include/asm/pil.h
index 26693703..522b43d 100644
--- a/arch/sparc/include/asm/pil.h
+++ b/arch/sparc/include/asm/pil.h
@@ -20,7 +20,6 @@
#define PIL_SMP_CALL_FUNC 1
#define PIL_SMP_RECEIVE_SIGNAL 2
#define PIL_SMP_CAPTURE 3
-#define PIL_SMP_CTX_NEW_VERSION 4
#define PIL_DEVICE_IRQ 5
#define PIL_SMP_CALL_FUNC_SNGL 6
#define PIL_DEFERRED_PCR_WORK 7
diff --git a/arch/sparc/include/asm/vio.h b/arch/sparc/include/asm/vio.h
index 8174f6c..9dca7a8 100644
--- a/arch/sparc/include/asm/vio.h
+++ b/arch/sparc/include/asm/vio.h
@@ -327,6 +327,7 @@
int compat_len;
u64 dev_no;
+ u64 id;
unsigned long channel_id;
diff --git a/arch/sparc/kernel/ds.c b/arch/sparc/kernel/ds.c
index b542cc7..f87265a 100644
--- a/arch/sparc/kernel/ds.c
+++ b/arch/sparc/kernel/ds.c
@@ -909,7 +909,7 @@
pbuf.req.handle = cp->handle;
pbuf.req.major = 1;
pbuf.req.minor = 0;
- strcpy(pbuf.req.svc_id, cp->service_id);
+ strcpy(pbuf.id_buf, cp->service_id);
err = __ds_send(lp, &pbuf, msg_len);
if (err > 0)
diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c
index 4d0248a..99dd133 100644
--- a/arch/sparc/kernel/irq_64.c
+++ b/arch/sparc/kernel/irq_64.c
@@ -1034,17 +1034,26 @@
{
#ifdef CONFIG_SMP
unsigned long page;
+ void *mondo, *p;
- BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > (PAGE_SIZE - 64));
+ BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > PAGE_SIZE);
+
+ /* Make sure mondo block is 64byte aligned */
+ p = kzalloc(127, GFP_KERNEL);
+ if (!p) {
+ prom_printf("SUN4V: Error, cannot allocate mondo block.\n");
+ prom_halt();
+ }
+ mondo = (void *)(((unsigned long)p + 63) & ~0x3f);
+ tb->cpu_mondo_block_pa = __pa(mondo);
page = get_zeroed_page(GFP_KERNEL);
if (!page) {
- prom_printf("SUN4V: Error, cannot allocate cpu mondo page.\n");
+ prom_printf("SUN4V: Error, cannot allocate cpu list page.\n");
prom_halt();
}
- tb->cpu_mondo_block_pa = __pa(page);
- tb->cpu_list_pa = __pa(page + 64);
+ tb->cpu_list_pa = __pa(page);
#endif
}
diff --git a/arch/sparc/kernel/kernel.h b/arch/sparc/kernel/kernel.h
index c9804551..6ae1e77 100644
--- a/arch/sparc/kernel/kernel.h
+++ b/arch/sparc/kernel/kernel.h
@@ -37,7 +37,6 @@
/* smp_64.c */
void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs);
void __irq_entry smp_call_function_single_client(int irq, struct pt_regs *regs);
-void __irq_entry smp_new_mmu_context_version_client(int irq, struct pt_regs *regs);
void __irq_entry smp_penguin_jailcell(int irq, struct pt_regs *regs);
void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs);
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index b3bc0ac..fdf3104 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -964,37 +964,6 @@
preempt_enable();
}
-void __irq_entry smp_new_mmu_context_version_client(int irq, struct pt_regs *regs)
-{
- struct mm_struct *mm;
- unsigned long flags;
-
- clear_softint(1 << irq);
-
- /* See if we need to allocate a new TLB context because
- * the version of the one we are using is now out of date.
- */
- mm = current->active_mm;
- if (unlikely(!mm || (mm == &init_mm)))
- return;
-
- spin_lock_irqsave(&mm->context.lock, flags);
-
- if (unlikely(!CTX_VALID(mm->context)))
- get_new_mmu_context(mm);
-
- spin_unlock_irqrestore(&mm->context.lock, flags);
-
- load_secondary_context(mm);
- __flush_tlb_mm(CTX_HWBITS(mm->context),
- SECONDARY_CONTEXT);
-}
-
-void smp_new_mmu_context_version(void)
-{
- smp_cross_call(&xcall_new_mmu_context_version, 0, 0, 0);
-}
-
#ifdef CONFIG_KGDB
void kgdb_roundup_cpus(unsigned long flags)
{
diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
index ef4520e..043544d 100644
--- a/arch/sparc/kernel/sys_sparc_64.c
+++ b/arch/sparc/kernel/sys_sparc_64.c
@@ -120,7 +120,7 @@
vma = find_vma(mm, addr);
if (task_size - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
@@ -183,7 +183,7 @@
vma = find_vma(mm, addr);
if (task_size - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
diff --git a/arch/sparc/kernel/tsb.S b/arch/sparc/kernel/tsb.S
index 10689cf..07c0df9 100644
--- a/arch/sparc/kernel/tsb.S
+++ b/arch/sparc/kernel/tsb.S
@@ -455,13 +455,16 @@
.type copy_tsb,#function
copy_tsb: /* %o0=old_tsb_base, %o1=old_tsb_size
* %o2=new_tsb_base, %o3=new_tsb_size
+ * %o4=page_size_shift
*/
sethi %uhi(TSB_PASS_BITS), %g7
srlx %o3, 4, %o3
- add %o0, %o1, %g1 /* end of old tsb */
+ add %o0, %o1, %o1 /* end of old tsb */
sllx %g7, 32, %g7
sub %o3, 1, %o3 /* %o3 == new tsb hash mask */
+ mov %o4, %g1 /* page_size_shift */
+
661: prefetcha [%o0] ASI_N, #one_read
.section .tsb_phys_patch, "ax"
.word 661b
@@ -486,9 +489,9 @@
/* This can definitely be computed faster... */
srlx %o0, 4, %o5 /* Build index */
and %o5, 511, %o5 /* Mask index */
- sllx %o5, PAGE_SHIFT, %o5 /* Put into vaddr position */
+ sllx %o5, %g1, %o5 /* Put into vaddr position */
or %o4, %o5, %o4 /* Full VADDR. */
- srlx %o4, PAGE_SHIFT, %o4 /* Shift down to create index */
+ srlx %o4, %g1, %o4 /* Shift down to create index */
and %o4, %o3, %o4 /* Mask with new_tsb_nents-1 */
sllx %o4, 4, %o4 /* Shift back up into tsb ent offset */
TSB_STORE(%o2 + %o4, %g2) /* Store TAG */
@@ -496,7 +499,7 @@
TSB_STORE(%o2 + %o4, %g3) /* Store TTE */
80: add %o0, 16, %o0
- cmp %o0, %g1
+ cmp %o0, %o1
bne,pt %xcc, 90b
nop
diff --git a/arch/sparc/kernel/ttable_64.S b/arch/sparc/kernel/ttable_64.S
index 7bd8f65..efe93ab 100644
--- a/arch/sparc/kernel/ttable_64.S
+++ b/arch/sparc/kernel/ttable_64.S
@@ -50,7 +50,7 @@
tl0_irq1: TRAP_IRQ(smp_call_function_client, 1)
tl0_irq2: TRAP_IRQ(smp_receive_signal_client, 2)
tl0_irq3: TRAP_IRQ(smp_penguin_jailcell, 3)
-tl0_irq4: TRAP_IRQ(smp_new_mmu_context_version_client, 4)
+tl0_irq4: BTRAP(0x44)
#else
tl0_irq1: BTRAP(0x41)
tl0_irq2: BTRAP(0x42)
diff --git a/arch/sparc/kernel/vio.c b/arch/sparc/kernel/vio.c
index f6bb857..075d389 100644
--- a/arch/sparc/kernel/vio.c
+++ b/arch/sparc/kernel/vio.c
@@ -302,13 +302,16 @@
if (!id) {
dev_set_name(&vdev->dev, "%s", bus_id_name);
vdev->dev_no = ~(u64)0;
+ vdev->id = ~(u64)0;
} else if (!cfg_handle) {
dev_set_name(&vdev->dev, "%s-%llu", bus_id_name, *id);
vdev->dev_no = *id;
+ vdev->id = ~(u64)0;
} else {
dev_set_name(&vdev->dev, "%s-%llu-%llu", bus_id_name,
*cfg_handle, *id);
vdev->dev_no = *cfg_handle;
+ vdev->id = *id;
}
vdev->dev.parent = parent;
@@ -351,27 +354,84 @@
(void) vio_create_one(hp, node, &root_vdev->dev);
}
+struct vio_md_node_query {
+ const char *type;
+ u64 dev_no;
+ u64 id;
+};
+
static int vio_md_node_match(struct device *dev, void *arg)
{
+ struct vio_md_node_query *query = (struct vio_md_node_query *) arg;
struct vio_dev *vdev = to_vio_dev(dev);
- if (vdev->mp == (u64) arg)
- return 1;
+ if (vdev->dev_no != query->dev_no)
+ return 0;
+ if (vdev->id != query->id)
+ return 0;
+ if (strcmp(vdev->type, query->type))
+ return 0;
- return 0;
+ return 1;
}
static void vio_remove(struct mdesc_handle *hp, u64 node)
{
+ const char *type;
+ const u64 *id, *cfg_handle;
+ u64 a;
+ struct vio_md_node_query query;
struct device *dev;
- dev = device_find_child(&root_vdev->dev, (void *) node,
+ type = mdesc_get_property(hp, node, "device-type", NULL);
+ if (!type) {
+ type = mdesc_get_property(hp, node, "name", NULL);
+ if (!type)
+ type = mdesc_node_name(hp, node);
+ }
+
+ query.type = type;
+
+ id = mdesc_get_property(hp, node, "id", NULL);
+ cfg_handle = NULL;
+ mdesc_for_each_arc(a, hp, node, MDESC_ARC_TYPE_BACK) {
+ u64 target;
+
+ target = mdesc_arc_target(hp, a);
+ cfg_handle = mdesc_get_property(hp, target,
+ "cfg-handle", NULL);
+ if (cfg_handle)
+ break;
+ }
+
+ if (!id) {
+ query.dev_no = ~(u64)0;
+ query.id = ~(u64)0;
+ } else if (!cfg_handle) {
+ query.dev_no = *id;
+ query.id = ~(u64)0;
+ } else {
+ query.dev_no = *cfg_handle;
+ query.id = *id;
+ }
+
+ dev = device_find_child(&root_vdev->dev, &query,
vio_md_node_match);
if (dev) {
printk(KERN_INFO "VIO: Removing device %s\n", dev_name(dev));
device_unregister(dev);
put_device(dev);
+ } else {
+ if (!id)
+ printk(KERN_ERR "VIO: Removed unknown %s node.\n",
+ type);
+ else if (!cfg_handle)
+ printk(KERN_ERR "VIO: Removed unknown %s node %llu.\n",
+ type, *id);
+ else
+ printk(KERN_ERR "VIO: Removed unknown %s node %llu-%llu.\n",
+ type, *cfg_handle, *id);
}
}
diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
index 69912d2..07c03e7 100644
--- a/arch/sparc/lib/Makefile
+++ b/arch/sparc/lib/Makefile
@@ -15,6 +15,7 @@
lib-$(CONFIG_SPARC64) += atomic_64.o
lib-$(CONFIG_SPARC32) += lshrdi3.o ashldi3.o
lib-$(CONFIG_SPARC32) += muldi3.o bitext.o cmpdi2.o
+lib-$(CONFIG_SPARC64) += multi3.o
lib-$(CONFIG_SPARC64) += copy_page.o clear_page.o bzero.o
lib-$(CONFIG_SPARC64) += csum_copy.o csum_copy_from_user.o csum_copy_to_user.o
diff --git a/arch/sparc/lib/multi3.S b/arch/sparc/lib/multi3.S
new file mode 100644
index 0000000..d6b6c97
--- /dev/null
+++ b/arch/sparc/lib/multi3.S
@@ -0,0 +1,35 @@
+#include <linux/linkage.h>
+#include <asm/export.h>
+
+ .text
+ .align 4
+ENTRY(__multi3) /* %o0 = u, %o1 = v */
+ mov %o1, %g1
+ srl %o3, 0, %g4
+ mulx %g4, %g1, %o1
+ srlx %g1, 0x20, %g3
+ mulx %g3, %g4, %g5
+ sllx %g5, 0x20, %o5
+ srl %g1, 0, %g4
+ sub %o1, %o5, %o5
+ srlx %o5, 0x20, %o5
+ addcc %g5, %o5, %g5
+ srlx %o3, 0x20, %o5
+ mulx %g4, %o5, %g4
+ mulx %g3, %o5, %o5
+ sethi %hi(0x80000000), %g3
+ addcc %g5, %g4, %g5
+ srlx %g5, 0x20, %g5
+ add %g3, %g3, %g3
+ movcc %xcc, %g0, %g3
+ addcc %o5, %g5, %o5
+ sllx %g4, 0x20, %g4
+ add %o1, %g4, %o1
+ add %o5, %g3, %g2
+ mulx %g1, %o2, %g1
+ add %g1, %g2, %g1
+ mulx %o0, %o3, %o0
+ retl
+ add %g1, %o0, %o0
+ENDPROC(__multi3)
+EXPORT_SYMBOL(__multi3)
diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
index 7c29d38..88855e3 100644
--- a/arch/sparc/mm/hugetlbpage.c
+++ b/arch/sparc/mm/hugetlbpage.c
@@ -120,7 +120,7 @@
addr = ALIGN(addr, huge_page_size(h));
vma = find_vma(mm, addr);
if (task_size - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
if (mm->get_unmapped_area == arch_get_unmapped_area)
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index 0cda653..3c40ebd 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -358,7 +358,8 @@
}
if ((hv_pgsz_mask & cpu_pgsz_mask) == 0U) {
- pr_warn("hugepagesz=%llu not supported by MMU.\n",
+ hugetlb_bad_size();
+ pr_err("hugepagesz=%llu not supported by MMU.\n",
hugepage_size);
goto out;
}
@@ -706,10 +707,58 @@
/* get_new_mmu_context() uses "cache + 1". */
DEFINE_SPINLOCK(ctx_alloc_lock);
-unsigned long tlb_context_cache = CTX_FIRST_VERSION - 1;
+unsigned long tlb_context_cache = CTX_FIRST_VERSION;
#define MAX_CTX_NR (1UL << CTX_NR_BITS)
#define CTX_BMAP_SLOTS BITS_TO_LONGS(MAX_CTX_NR)
DECLARE_BITMAP(mmu_context_bmap, MAX_CTX_NR);
+DEFINE_PER_CPU(struct mm_struct *, per_cpu_secondary_mm) = {0};
+
+static void mmu_context_wrap(void)
+{
+ unsigned long old_ver = tlb_context_cache & CTX_VERSION_MASK;
+ unsigned long new_ver, new_ctx, old_ctx;
+ struct mm_struct *mm;
+ int cpu;
+
+ bitmap_zero(mmu_context_bmap, 1 << CTX_NR_BITS);
+
+ /* Reserve kernel context */
+ set_bit(0, mmu_context_bmap);
+
+ new_ver = (tlb_context_cache & CTX_VERSION_MASK) + CTX_FIRST_VERSION;
+ if (unlikely(new_ver == 0))
+ new_ver = CTX_FIRST_VERSION;
+ tlb_context_cache = new_ver;
+
+ /*
+ * Make sure that any new mm that are added into per_cpu_secondary_mm,
+ * are going to go through get_new_mmu_context() path.
+ */
+ mb();
+
+ /*
+ * Updated versions to current on those CPUs that had valid secondary
+ * contexts
+ */
+ for_each_online_cpu(cpu) {
+ /*
+ * If a new mm is stored after we took this mm from the array,
+ * it will go into get_new_mmu_context() path, because we
+ * already bumped the version in tlb_context_cache.
+ */
+ mm = per_cpu(per_cpu_secondary_mm, cpu);
+
+ if (unlikely(!mm || mm == &init_mm))
+ continue;
+
+ old_ctx = mm->context.sparc64_ctx_val;
+ if (likely((old_ctx & CTX_VERSION_MASK) == old_ver)) {
+ new_ctx = (old_ctx & ~CTX_VERSION_MASK) | new_ver;
+ set_bit(new_ctx & CTX_NR_MASK, mmu_context_bmap);
+ mm->context.sparc64_ctx_val = new_ctx;
+ }
+ }
+}
/* Caller does TLB context flushing on local CPU if necessary.
* The caller also ensures that CTX_VALID(mm->context) is false.
@@ -725,48 +774,30 @@
{
unsigned long ctx, new_ctx;
unsigned long orig_pgsz_bits;
- int new_version;
spin_lock(&ctx_alloc_lock);
+retry:
+ /* wrap might have happened, test again if our context became valid */
+ if (unlikely(CTX_VALID(mm->context)))
+ goto out;
orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK);
ctx = (tlb_context_cache + 1) & CTX_NR_MASK;
new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx);
- new_version = 0;
if (new_ctx >= (1 << CTX_NR_BITS)) {
new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1);
if (new_ctx >= ctx) {
- int i;
- new_ctx = (tlb_context_cache & CTX_VERSION_MASK) +
- CTX_FIRST_VERSION;
- if (new_ctx == 1)
- new_ctx = CTX_FIRST_VERSION;
-
- /* Don't call memset, for 16 entries that's just
- * plain silly...
- */
- mmu_context_bmap[0] = 3;
- mmu_context_bmap[1] = 0;
- mmu_context_bmap[2] = 0;
- mmu_context_bmap[3] = 0;
- for (i = 4; i < CTX_BMAP_SLOTS; i += 4) {
- mmu_context_bmap[i + 0] = 0;
- mmu_context_bmap[i + 1] = 0;
- mmu_context_bmap[i + 2] = 0;
- mmu_context_bmap[i + 3] = 0;
- }
- new_version = 1;
- goto out;
+ mmu_context_wrap();
+ goto retry;
}
}
+ if (mm->context.sparc64_ctx_val)
+ cpumask_clear(mm_cpumask(mm));
mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63));
new_ctx |= (tlb_context_cache & CTX_VERSION_MASK);
-out:
tlb_context_cache = new_ctx;
mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits;
+out:
spin_unlock(&ctx_alloc_lock);
-
- if (unlikely(new_version))
- smp_new_mmu_context_version();
}
static int numa_enabled = 1;
diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c
index bedf08b..0d4b998 100644
--- a/arch/sparc/mm/tsb.c
+++ b/arch/sparc/mm/tsb.c
@@ -496,7 +496,8 @@
extern void copy_tsb(unsigned long old_tsb_base,
unsigned long old_tsb_size,
unsigned long new_tsb_base,
- unsigned long new_tsb_size);
+ unsigned long new_tsb_size,
+ unsigned long page_size_shift);
unsigned long old_tsb_base = (unsigned long) old_tsb;
unsigned long new_tsb_base = (unsigned long) new_tsb;
@@ -504,7 +505,9 @@
old_tsb_base = __pa(old_tsb_base);
new_tsb_base = __pa(new_tsb_base);
}
- copy_tsb(old_tsb_base, old_size, new_tsb_base, new_size);
+ copy_tsb(old_tsb_base, old_size, new_tsb_base, new_size,
+ tsb_index == MM_TSB_BASE ?
+ PAGE_SHIFT : REAL_HPAGE_SHIFT);
}
mm->context.tsb_block[tsb_index].tsb = new_tsb;
diff --git a/arch/sparc/mm/ultra.S b/arch/sparc/mm/ultra.S
index 5d2fd6c..fcf4d27 100644
--- a/arch/sparc/mm/ultra.S
+++ b/arch/sparc/mm/ultra.S
@@ -971,11 +971,6 @@
wr %g0, (1 << PIL_SMP_CAPTURE), %set_softint
retry
- .globl xcall_new_mmu_context_version
-xcall_new_mmu_context_version:
- wr %g0, (1 << PIL_SMP_CTX_NEW_VERSION), %set_softint
- retry
-
#ifdef CONFIG_KGDB
.globl xcall_kgdb_capture
xcall_kgdb_capture:
diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c
index cb10153..03e5cc4 100644
--- a/arch/tile/mm/hugetlbpage.c
+++ b/arch/tile/mm/hugetlbpage.c
@@ -233,7 +233,7 @@
addr = ALIGN(addr, huge_page_size(h));
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
if (current->mm->get_unmapped_area == arch_get_unmapped_area)
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 4ccfacc..0efb4c9 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -2776,10 +2776,6 @@
config SYSVIPC_COMPAT
def_bool y
depends on SYSVIPC
-
-config KEYS_COMPAT
- def_bool y
- depends on KEYS
endif
endmenu
diff --git a/arch/x86/include/asm/extable.h b/arch/x86/include/asm/extable.h
index b8ad261..c66d19e 100644
--- a/arch/x86/include/asm/extable.h
+++ b/arch/x86/include/asm/extable.h
@@ -29,6 +29,7 @@
} while (0)
extern int fixup_exception(struct pt_regs *regs, int trapnr);
+extern int fixup_bug(struct pt_regs *regs, int trapnr);
extern bool ex_has_fault_handler(unsigned long ip);
extern void early_fixup_exception(struct pt_regs *regs, int trapnr);
diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c
index a70fd61..6f07744 100644
--- a/arch/x86/kernel/cpu/cyrix.c
+++ b/arch/x86/kernel/cpu/cyrix.c
@@ -255,6 +255,7 @@
break;
case 4: /* MediaGX/GXm or Geode GXM/GXLV/GX1 */
+ case 11: /* GX1 with inverted Device ID */
#ifdef CONFIG_PCI
{
u32 vendor, device;
diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
index afdfd23..f522415 100644
--- a/arch/x86/kernel/cpu/microcode/intel.c
+++ b/arch/x86/kernel/cpu/microcode/intel.c
@@ -619,6 +619,9 @@
show_saved_mc();
+ /* initrd is going away, clear patch ptr. */
+ intel_ucode_patch = NULL;
+
return 0;
}
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index da5c097..43e10d6 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -161,8 +161,8 @@
*/
rcu_irq_exit();
native_safe_halt();
- rcu_irq_enter();
local_irq_disable();
+ rcu_irq_enter();
}
}
if (!n.halted)
diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
index 207b8f2..213ddf3 100644
--- a/arch/x86/kernel/sys_x86_64.c
+++ b/arch/x86/kernel/sys_x86_64.c
@@ -144,7 +144,7 @@
addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
if (end - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
@@ -187,7 +187,7 @@
addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 3995d3a..bf54309 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -182,7 +182,7 @@
return ud == INSN_UD0 || ud == INSN_UD2;
}
-static int fixup_bug(struct pt_regs *regs, int trapnr)
+int fixup_bug(struct pt_regs *regs, int trapnr)
{
if (trapnr != X86_TRAP_UD)
return 0;
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index a181ae7..59ca2ee 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -780,18 +780,20 @@
static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i)
{
struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i];
- int j, nent = vcpu->arch.cpuid_nent;
+ struct kvm_cpuid_entry2 *ej;
+ int j = i;
+ int nent = vcpu->arch.cpuid_nent;
e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT;
/* when no next entry is found, the current entry[i] is reselected */
- for (j = i + 1; ; j = (j + 1) % nent) {
- struct kvm_cpuid_entry2 *ej = &vcpu->arch.cpuid_entries[j];
- if (ej->function == e->function) {
- ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
- return j;
- }
- }
- return 0; /* silence gcc, even though control never reaches here */
+ do {
+ j = (j + 1) % nent;
+ ej = &vcpu->arch.cpuid_entries[j];
+ } while (ej->function != e->function);
+
+ ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
+
+ return j;
}
/* find an entry with matching function, matching index (if needed), and that
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 5d3376f..cb82259 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -3698,12 +3698,15 @@
return kvm_setup_async_pf(vcpu, gva, kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch);
}
-static bool can_do_async_pf(struct kvm_vcpu *vcpu)
+bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu)
{
if (unlikely(!lapic_in_kernel(vcpu) ||
kvm_event_needs_reinjection(vcpu)))
return false;
+ if (is_guest_mode(vcpu))
+ return false;
+
return kvm_x86_ops->interrupt_allowed(vcpu);
}
@@ -3719,7 +3722,7 @@
if (!async)
return false; /* *pfn has correct page already */
- if (!prefault && can_do_async_pf(vcpu)) {
+ if (!prefault && kvm_can_do_async_pf(vcpu)) {
trace_kvm_try_async_get_page(gva, gfn);
if (kvm_find_async_pf_gfn(vcpu, gfn)) {
trace_kvm_async_pf_doublefault(gva, gfn);
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index 2797580..330bf3a 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -76,6 +76,7 @@
void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu);
void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
bool accessed_dirty);
+bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu);
static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm)
{
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 9b4b5d6..ca5d2b9 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2425,7 +2425,7 @@
if (!(vmcs12->exception_bitmap & (1u << nr)))
return 0;
- nested_vmx_vmexit(vcpu, to_vmx(vcpu)->exit_reason,
+ nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI,
vmcs_read32(VM_EXIT_INTR_INFO),
vmcs_readl(EXIT_QUALIFICATION));
return 1;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index a2cd099..87d3cb9 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -8607,8 +8607,7 @@
if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED))
return true;
else
- return !kvm_event_needs_reinjection(vcpu) &&
- kvm_x86_ops->interrupt_allowed(vcpu);
+ return kvm_can_do_async_pf(vcpu);
}
void kvm_arch_start_assignment(struct kvm *kvm)
diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
index 35ea061..0ea8afc 100644
--- a/arch/x86/mm/extable.c
+++ b/arch/x86/mm/extable.c
@@ -162,6 +162,9 @@
if (fixup_exception(regs, trapnr))
return;
+ if (fixup_bug(regs, trapnr))
+ return;
+
fail:
early_printk("PANIC: early exception 0x%02x IP %lx:%lx error %lx cr2 0x%lx\n",
(unsigned)trapnr, (unsigned long)regs->cs, regs->ip,
diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
index 302f43f..adad702 100644
--- a/arch/x86/mm/hugetlbpage.c
+++ b/arch/x86/mm/hugetlbpage.c
@@ -148,7 +148,7 @@
addr = ALIGN(addr, huge_page_size(h));
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
if (mm->get_unmapped_area == arch_get_unmapped_area)
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index cbc87ea..9b3f9fa 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -161,16 +161,16 @@
static void __init probe_page_size_mask(void)
{
-#if !defined(CONFIG_KMEMCHECK)
/*
* For CONFIG_KMEMCHECK or pagealloc debugging, identity mapping will
* use small pages.
* This will simplify cpa(), which otherwise needs to support splitting
* large pages into small in interrupt context, etc.
*/
- if (boot_cpu_has(X86_FEATURE_PSE) && !debug_pagealloc_enabled())
+ if (boot_cpu_has(X86_FEATURE_PSE) && !debug_pagealloc_enabled() && !IS_ENABLED(CONFIG_KMEMCHECK))
page_size_mask |= 1 << PG_LEVEL_2M;
-#endif
+ else
+ direct_gbpages = 0;
/* Enable PSE if available */
if (boot_cpu_has(X86_FEATURE_PSE))
diff --git a/arch/xtensa/include/asm/irq.h b/arch/xtensa/include/asm/irq.h
index f71f88e..19707db 100644
--- a/arch/xtensa/include/asm/irq.h
+++ b/arch/xtensa/include/asm/irq.h
@@ -29,7 +29,8 @@
# define PLATFORM_NR_IRQS 0
#endif
#define XTENSA_NR_IRQS XCHAL_NUM_INTERRUPTS
-#define NR_IRQS (XTENSA_NR_IRQS + VARIANT_NR_IRQS + PLATFORM_NR_IRQS)
+#define NR_IRQS (XTENSA_NR_IRQS + VARIANT_NR_IRQS + PLATFORM_NR_IRQS + 1)
+#define XTENSA_PIC_LINUX_IRQ(hwirq) ((hwirq) + 1)
#if VARIANT_NR_IRQS == 0
static inline void variant_init_irq(void) { }
diff --git a/arch/xtensa/kernel/irq.c b/arch/xtensa/kernel/irq.c
index a265edd..9934102 100644
--- a/arch/xtensa/kernel/irq.c
+++ b/arch/xtensa/kernel/irq.c
@@ -34,11 +34,6 @@
{
int irq = irq_find_mapping(NULL, hwirq);
- if (hwirq >= NR_IRQS) {
- printk(KERN_EMERG "%s: cannot handle IRQ %d\n",
- __func__, hwirq);
- }
-
#ifdef CONFIG_DEBUG_STACKOVERFLOW
/* Debugging check for stack overflow: is there less than 1KB free? */
{
diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c
index 394ef08..33bfa52 100644
--- a/arch/xtensa/kernel/setup.c
+++ b/arch/xtensa/kernel/setup.c
@@ -593,8 +593,7 @@
(ccount_freq/10000) % 100,
loops_per_jiffy/(500000/HZ),
(loops_per_jiffy/(5000/HZ)) % 100);
-
- seq_printf(f,"flags\t\t: "
+ seq_puts(f, "flags\t\t: "
#if XCHAL_HAVE_NMI
"nmi "
#endif
diff --git a/arch/xtensa/kernel/syscall.c b/arch/xtensa/kernel/syscall.c
index 0693792..74afbf0 100644
--- a/arch/xtensa/kernel/syscall.c
+++ b/arch/xtensa/kernel/syscall.c
@@ -88,7 +88,7 @@
/* At this point: (!vmm || addr < vmm->vm_end). */
if (TASK_SIZE - len < addr)
return -ENOMEM;
- if (!vmm || addr + len <= vmm->vm_start)
+ if (!vmm || addr + len <= vm_start_gap(vmm))
return addr;
addr = vmm->vm_end;
if (flags & MAP_SHARED)
diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S
index 30d9fc2..162c77e 100644
--- a/arch/xtensa/kernel/vmlinux.lds.S
+++ b/arch/xtensa/kernel/vmlinux.lds.S
@@ -118,7 +118,7 @@
SECTION_VECTOR (.KernelExceptionVector.text, KERNEL_VECTOR_VADDR)
SECTION_VECTOR (.UserExceptionVector.literal, USER_VECTOR_VADDR - 4)
SECTION_VECTOR (.UserExceptionVector.text, USER_VECTOR_VADDR)
- SECTION_VECTOR (.DoubleExceptionVector.literal, DOUBLEEXC_VECTOR_VADDR - 48)
+ SECTION_VECTOR (.DoubleExceptionVector.literal, DOUBLEEXC_VECTOR_VADDR - 20)
SECTION_VECTOR (.DoubleExceptionVector.text, DOUBLEEXC_VECTOR_VADDR)
#endif
@@ -306,13 +306,13 @@
.UserExceptionVector.literal)
SECTION_VECTOR (_DoubleExceptionVector_literal,
.DoubleExceptionVector.literal,
- DOUBLEEXC_VECTOR_VADDR - 48,
+ DOUBLEEXC_VECTOR_VADDR - 20,
SIZEOF(.UserExceptionVector.text),
.UserExceptionVector.text)
SECTION_VECTOR (_DoubleExceptionVector_text,
.DoubleExceptionVector.text,
DOUBLEEXC_VECTOR_VADDR,
- 48,
+ 20,
.DoubleExceptionVector.literal)
. = (LOADADDR( .DoubleExceptionVector.text ) + SIZEOF( .DoubleExceptionVector.text ) + 3) & ~ 3;
diff --git a/arch/xtensa/platforms/iss/simdisk.c b/arch/xtensa/platforms/iss/simdisk.c
index 02e94bb..c45b90b 100644
--- a/arch/xtensa/platforms/iss/simdisk.c
+++ b/arch/xtensa/platforms/iss/simdisk.c
@@ -317,8 +317,7 @@
if (simdisk_count > MAX_SIMDISK_COUNT)
simdisk_count = MAX_SIMDISK_COUNT;
- sddev = kmalloc(simdisk_count * sizeof(struct simdisk),
- GFP_KERNEL);
+ sddev = kmalloc_array(simdisk_count, sizeof(*sddev), GFP_KERNEL);
if (sddev == NULL)
goto out_unregister;
diff --git a/arch/xtensa/platforms/xtfpga/include/platform/hardware.h b/arch/xtensa/platforms/xtfpga/include/platform/hardware.h
index dbeea2b..1fda7e2 100644
--- a/arch/xtensa/platforms/xtfpga/include/platform/hardware.h
+++ b/arch/xtensa/platforms/xtfpga/include/platform/hardware.h
@@ -24,16 +24,18 @@
/* Interrupt configuration. */
-#define PLATFORM_NR_IRQS 10
+#define PLATFORM_NR_IRQS 0
/* Default assignment of LX60 devices to external interrupts. */
#ifdef CONFIG_XTENSA_MX
#define DUART16552_INTNUM XCHAL_EXTINT3_NUM
#define OETH_IRQ XCHAL_EXTINT4_NUM
+#define C67X00_IRQ XCHAL_EXTINT8_NUM
#else
#define DUART16552_INTNUM XCHAL_EXTINT0_NUM
#define OETH_IRQ XCHAL_EXTINT1_NUM
+#define C67X00_IRQ XCHAL_EXTINT5_NUM
#endif
/*
@@ -63,5 +65,5 @@
#define C67X00_PADDR (XCHAL_KIO_PADDR + 0x0D0D0000)
#define C67X00_SIZE 0x10
-#define C67X00_IRQ 5
+
#endif /* __XTENSA_XTAVNET_HARDWARE_H */
diff --git a/arch/xtensa/platforms/xtfpga/setup.c b/arch/xtensa/platforms/xtfpga/setup.c
index 779be72..42285f3 100644
--- a/arch/xtensa/platforms/xtfpga/setup.c
+++ b/arch/xtensa/platforms/xtfpga/setup.c
@@ -175,8 +175,8 @@
.flags = IORESOURCE_MEM,
},
[2] = { /* IRQ number */
- .start = OETH_IRQ,
- .end = OETH_IRQ,
+ .start = XTENSA_PIC_LINUX_IRQ(OETH_IRQ),
+ .end = XTENSA_PIC_LINUX_IRQ(OETH_IRQ),
.flags = IORESOURCE_IRQ,
},
};
@@ -213,8 +213,8 @@
.flags = IORESOURCE_MEM,
},
[1] = { /* IRQ number */
- .start = C67X00_IRQ,
- .end = C67X00_IRQ,
+ .start = XTENSA_PIC_LINUX_IRQ(C67X00_IRQ),
+ .end = XTENSA_PIC_LINUX_IRQ(C67X00_IRQ),
.flags = IORESOURCE_IRQ,
},
};
@@ -247,7 +247,7 @@
static struct plat_serial8250_port serial_platform_data[] = {
[0] = {
.mapbase = DUART16552_PADDR,
- .irq = DUART16552_INTNUM,
+ .irq = XTENSA_PIC_LINUX_IRQ(DUART16552_INTNUM),
.flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST |
UPF_IOREMAP,
.iotype = XCHAL_HAVE_BE ? UPIO_MEM32BE : UPIO_MEM32,
diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c
index c8a32fb..78b2e0d 100644
--- a/block/bfq-cgroup.c
+++ b/block/bfq-cgroup.c
@@ -52,7 +52,7 @@
BFQG_FLAG_FNS(empty)
#undef BFQG_FLAG_FNS
-/* This should be called with the queue_lock held. */
+/* This should be called with the scheduler lock held. */
static void bfqg_stats_update_group_wait_time(struct bfqg_stats *stats)
{
unsigned long long now;
@@ -67,7 +67,7 @@
bfqg_stats_clear_waiting(stats);
}
-/* This should be called with the queue_lock held. */
+/* This should be called with the scheduler lock held. */
static void bfqg_stats_set_start_group_wait_time(struct bfq_group *bfqg,
struct bfq_group *curr_bfqg)
{
@@ -81,7 +81,7 @@
bfqg_stats_mark_waiting(stats);
}
-/* This should be called with the queue_lock held. */
+/* This should be called with the scheduler lock held. */
static void bfqg_stats_end_empty_time(struct bfqg_stats *stats)
{
unsigned long long now;
@@ -203,12 +203,30 @@
static void bfqg_get(struct bfq_group *bfqg)
{
- return blkg_get(bfqg_to_blkg(bfqg));
+ bfqg->ref++;
}
void bfqg_put(struct bfq_group *bfqg)
{
- return blkg_put(bfqg_to_blkg(bfqg));
+ bfqg->ref--;
+
+ if (bfqg->ref == 0)
+ kfree(bfqg);
+}
+
+static void bfqg_and_blkg_get(struct bfq_group *bfqg)
+{
+ /* see comments in bfq_bic_update_cgroup for why refcounting bfqg */
+ bfqg_get(bfqg);
+
+ blkg_get(bfqg_to_blkg(bfqg));
+}
+
+void bfqg_and_blkg_put(struct bfq_group *bfqg)
+{
+ bfqg_put(bfqg);
+
+ blkg_put(bfqg_to_blkg(bfqg));
}
void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq,
@@ -312,7 +330,11 @@
if (bfqq) {
bfqq->ioprio = bfqq->new_ioprio;
bfqq->ioprio_class = bfqq->new_ioprio_class;
- bfqg_get(bfqg);
+ /*
+ * Make sure that bfqg and its associated blkg do not
+ * disappear before entity.
+ */
+ bfqg_and_blkg_get(bfqg);
}
entity->parent = bfqg->my_entity; /* NULL for root group */
entity->sched_data = &bfqg->sched_data;
@@ -399,6 +421,8 @@
return NULL;
}
+ /* see comments in bfq_bic_update_cgroup for why refcounting */
+ bfqg_get(bfqg);
return &bfqg->pd;
}
@@ -426,7 +450,7 @@
struct bfq_group *bfqg = pd_to_bfqg(pd);
bfqg_stats_exit(&bfqg->stats);
- return kfree(bfqg);
+ bfqg_put(bfqg);
}
void bfq_pd_reset_stats(struct blkg_policy_data *pd)
@@ -496,9 +520,10 @@
* Move @bfqq to @bfqg, deactivating it from its old group and reactivating
* it on the new one. Avoid putting the entity on the old group idle tree.
*
- * Must be called under the queue lock; the cgroup owning @bfqg must
- * not disappear (by now this just means that we are called under
- * rcu_read_lock()).
+ * Must be called under the scheduler lock, to make sure that the blkg
+ * owning @bfqg does not disappear (see comments in
+ * bfq_bic_update_cgroup on guaranteeing the consistency of blkg
+ * objects).
*/
void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
struct bfq_group *bfqg)
@@ -519,16 +544,12 @@
bfq_deactivate_bfqq(bfqd, bfqq, false, false);
else if (entity->on_st)
bfq_put_idle_entity(bfq_entity_service_tree(entity), entity);
- bfqg_put(bfqq_group(bfqq));
+ bfqg_and_blkg_put(bfqq_group(bfqq));
- /*
- * Here we use a reference to bfqg. We don't need a refcounter
- * as the cgroup reference will not be dropped, so that its
- * destroy() callback will not be invoked.
- */
entity->parent = bfqg->my_entity;
entity->sched_data = &bfqg->sched_data;
- bfqg_get(bfqg);
+ /* pin down bfqg and its associated blkg */
+ bfqg_and_blkg_get(bfqg);
if (bfq_bfqq_busy(bfqq)) {
bfq_pos_tree_add_move(bfqd, bfqq);
@@ -545,8 +566,9 @@
* @bic: the bic to move.
* @blkcg: the blk-cgroup to move to.
*
- * Move bic to blkcg, assuming that bfqd->queue is locked; the caller
- * has to make sure that the reference to cgroup is valid across the call.
+ * Move bic to blkcg, assuming that bfqd->lock is held; which makes
+ * sure that the reference to cgroup is valid across the call (see
+ * comments in bfq_bic_update_cgroup on this issue)
*
* NOTE: an alternative approach might have been to store the current
* cgroup in bfqq and getting a reference to it, reducing the lookup
@@ -604,6 +626,57 @@
goto out;
bfqg = __bfq_bic_change_cgroup(bfqd, bic, bio_blkcg(bio));
+ /*
+ * Update blkg_path for bfq_log_* functions. We cache this
+ * path, and update it here, for the following
+ * reasons. Operations on blkg objects in blk-cgroup are
+ * protected with the request_queue lock, and not with the
+ * lock that protects the instances of this scheduler
+ * (bfqd->lock). This exposes BFQ to the following sort of
+ * race.
+ *
+ * The blkg_lookup performed in bfq_get_queue, protected
+ * through rcu, may happen to return the address of a copy of
+ * the original blkg. If this is the case, then the
+ * bfqg_and_blkg_get performed in bfq_get_queue, to pin down
+ * the blkg, is useless: it does not prevent blk-cgroup code
+ * from destroying both the original blkg and all objects
+ * directly or indirectly referred by the copy of the
+ * blkg.
+ *
+ * On the bright side, destroy operations on a blkg invoke, as
+ * a first step, hooks of the scheduler associated with the
+ * blkg. And these hooks are executed with bfqd->lock held for
+ * BFQ. As a consequence, for any blkg associated with the
+ * request queue this instance of the scheduler is attached
+ * to, we are guaranteed that such a blkg is not destroyed, and
+ * that all the pointers it contains are consistent, while we
+ * are holding bfqd->lock. A blkg_lookup performed with
+ * bfqd->lock held then returns a fully consistent blkg, which
+ * remains consistent until this lock is held.
+ *
+ * Thanks to the last fact, and to the fact that: (1) bfqg has
+ * been obtained through a blkg_lookup in the above
+ * assignment, and (2) bfqd->lock is being held, here we can
+ * safely use the policy data for the involved blkg (i.e., the
+ * field bfqg->pd) to get to the blkg associated with bfqg,
+ * and then we can safely use any field of blkg. After we
+ * release bfqd->lock, even just getting blkg through this
+ * bfqg may cause dangling references to be traversed, as
+ * bfqg->pd may not exist any more.
+ *
+ * In view of the above facts, here we cache, in the bfqg, any
+ * blkg data we may need for this bic, and for its associated
+ * bfq_queue. As of now, we need to cache only the path of the
+ * blkg, which is used in the bfq_log_* functions.
+ *
+ * Finally, note that bfqg itself needs to be protected from
+ * destruction on the blkg_free of the original blkg (which
+ * invokes bfq_pd_free). We use an additional private
+ * refcounter for bfqg, to let it disappear only after no
+ * bfq_queue refers to it any longer.
+ */
+ blkg_path(bfqg_to_blkg(bfqg), bfqg->blkg_path, sizeof(bfqg->blkg_path));
bic->blkcg_serial_nr = serial_nr;
out:
rcu_read_unlock();
@@ -640,8 +713,6 @@
* @bfqd: the device data structure with the root group.
* @bfqg: the group to move from.
* @st: the service tree with the entities.
- *
- * Needs queue_lock to be taken and reference to be valid over the call.
*/
static void bfq_reparent_active_entities(struct bfq_data *bfqd,
struct bfq_group *bfqg,
@@ -692,8 +763,7 @@
/*
* The idle tree may still contain bfq_queues belonging
* to exited task because they never migrated to a different
- * cgroup from the one being destroyed now. No one else
- * can access them so it's safe to act without any lock.
+ * cgroup from the one being destroyed now.
*/
bfq_flush_idle_tree(st);
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index 08ce450..ed93da2 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -3665,7 +3665,7 @@
kmem_cache_free(bfq_pool, bfqq);
#ifdef CONFIG_BFQ_GROUP_IOSCHED
- bfqg_put(bfqg);
+ bfqg_and_blkg_put(bfqg);
#endif
}
diff --git a/block/bfq-iosched.h b/block/bfq-iosched.h
index ae783c0..5c3bf98 100644
--- a/block/bfq-iosched.h
+++ b/block/bfq-iosched.h
@@ -759,6 +759,12 @@
/* must be the first member */
struct blkg_policy_data pd;
+ /* cached path for this blkg (see comments in bfq_bic_update_cgroup) */
+ char blkg_path[128];
+
+ /* reference counter (see comments in bfq_bic_update_cgroup) */
+ int ref;
+
struct bfq_entity entity;
struct bfq_sched_data sched_data;
@@ -838,7 +844,7 @@
struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg);
struct bfq_group *bfqq_group(struct bfq_queue *bfqq);
struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node);
-void bfqg_put(struct bfq_group *bfqg);
+void bfqg_and_blkg_put(struct bfq_group *bfqg);
#ifdef CONFIG_BFQ_GROUP_IOSCHED
extern struct cftype bfq_blkcg_legacy_files[];
@@ -910,20 +916,13 @@
struct bfq_group *bfqq_group(struct bfq_queue *bfqq);
#define bfq_log_bfqq(bfqd, bfqq, fmt, args...) do { \
- char __pbuf[128]; \
- \
- blkg_path(bfqg_to_blkg(bfqq_group(bfqq)), __pbuf, sizeof(__pbuf)); \
- blk_add_trace_msg((bfqd)->queue, "bfq%d%c %s " fmt, (bfqq)->pid, \
+ blk_add_trace_msg((bfqd)->queue, "bfq%d%c %s " fmt, (bfqq)->pid,\
bfq_bfqq_sync((bfqq)) ? 'S' : 'A', \
- __pbuf, ##args); \
+ bfqq_group(bfqq)->blkg_path, ##args); \
} while (0)
-#define bfq_log_bfqg(bfqd, bfqg, fmt, args...) do { \
- char __pbuf[128]; \
- \
- blkg_path(bfqg_to_blkg(bfqg), __pbuf, sizeof(__pbuf)); \
- blk_add_trace_msg((bfqd)->queue, "%s " fmt, __pbuf, ##args); \
-} while (0)
+#define bfq_log_bfqg(bfqd, bfqg, fmt, args...) \
+ blk_add_trace_msg((bfqd)->queue, "%s " fmt, (bfqg)->blkg_path, ##args)
#else /* CONFIG_BFQ_GROUP_IOSCHED */
diff --git a/block/bio-integrity.c b/block/bio-integrity.c
index 5384713..b5009a8 100644
--- a/block/bio-integrity.c
+++ b/block/bio-integrity.c
@@ -175,6 +175,9 @@
if (bio_op(bio) != REQ_OP_READ && bio_op(bio) != REQ_OP_WRITE)
return false;
+ if (!bio_sectors(bio))
+ return false;
+
/* Already protected? */
if (bio_integrity(bio))
return false;
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 1bccced..bb66c96 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1461,22 +1461,28 @@
return blk_tag_to_qc_t(rq->internal_tag, hctx->queue_num, true);
}
-static void __blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie,
- bool may_sleep)
+static void __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
+ struct request *rq,
+ blk_qc_t *cookie, bool may_sleep)
{
struct request_queue *q = rq->q;
struct blk_mq_queue_data bd = {
.rq = rq,
.last = true,
};
- struct blk_mq_hw_ctx *hctx;
blk_qc_t new_cookie;
int ret;
+ bool run_queue = true;
+
+ if (blk_mq_hctx_stopped(hctx)) {
+ run_queue = false;
+ goto insert;
+ }
if (q->elevator)
goto insert;
- if (!blk_mq_get_driver_tag(rq, &hctx, false))
+ if (!blk_mq_get_driver_tag(rq, NULL, false))
goto insert;
new_cookie = request_to_qc_t(hctx, rq);
@@ -1500,7 +1506,7 @@
__blk_mq_requeue_request(rq);
insert:
- blk_mq_sched_insert_request(rq, false, true, false, may_sleep);
+ blk_mq_sched_insert_request(rq, false, run_queue, false, may_sleep);
}
static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
@@ -1508,7 +1514,7 @@
{
if (!(hctx->flags & BLK_MQ_F_BLOCKING)) {
rcu_read_lock();
- __blk_mq_try_issue_directly(rq, cookie, false);
+ __blk_mq_try_issue_directly(hctx, rq, cookie, false);
rcu_read_unlock();
} else {
unsigned int srcu_idx;
@@ -1516,7 +1522,7 @@
might_sleep();
srcu_idx = srcu_read_lock(&hctx->queue_rq_srcu);
- __blk_mq_try_issue_directly(rq, cookie, true);
+ __blk_mq_try_issue_directly(hctx, rq, cookie, true);
srcu_read_unlock(&hctx->queue_rq_srcu, srcu_idx);
}
}
@@ -1619,9 +1625,12 @@
blk_mq_put_ctx(data.ctx);
- if (same_queue_rq)
+ if (same_queue_rq) {
+ data.hctx = blk_mq_map_queue(q,
+ same_queue_rq->mq_ctx->cpu);
blk_mq_try_issue_directly(data.hctx, same_queue_rq,
&cookie);
+ }
} else if (q->nr_hw_queues > 1 && is_sync) {
blk_mq_put_ctx(data.ctx);
blk_mq_bio_to_request(rq, bio);
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 283da7f..27aceab 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -777,24 +777,25 @@
}
/**
- * blk_release_queue: - release a &struct request_queue when it is no longer needed
- * @kobj: the kobj belonging to the request queue to be released
+ * __blk_release_queue - release a request queue when it is no longer needed
+ * @work: pointer to the release_work member of the request queue to be released
*
* Description:
- * blk_release_queue is the pair to blk_init_queue() or
- * blk_queue_make_request(). It should be called when a request queue is
- * being released; typically when a block device is being de-registered.
- * Currently, its primary task it to free all the &struct request
- * structures that were allocated to the queue and the queue itself.
+ * blk_release_queue is the counterpart of blk_init_queue(). It should be
+ * called when a request queue is being released; typically when a block
+ * device is being de-registered. Its primary task it to free the queue
+ * itself.
*
- * Note:
+ * Notes:
* The low level driver must have finished any outstanding requests first
* via blk_cleanup_queue().
- **/
-static void blk_release_queue(struct kobject *kobj)
+ *
+ * Although blk_release_queue() may be called with preemption disabled,
+ * __blk_release_queue() may sleep.
+ */
+static void __blk_release_queue(struct work_struct *work)
{
- struct request_queue *q =
- container_of(kobj, struct request_queue, kobj);
+ struct request_queue *q = container_of(work, typeof(*q), release_work);
if (test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags))
blk_stat_remove_callback(q, q->poll_cb);
@@ -834,6 +835,15 @@
call_rcu(&q->rcu_head, blk_free_queue_rcu);
}
+static void blk_release_queue(struct kobject *kobj)
+{
+ struct request_queue *q =
+ container_of(kobj, struct request_queue, kobj);
+
+ INIT_WORK(&q->release_work, __blk_release_queue);
+ schedule_work(&q->release_work);
+}
+
static const struct sysfs_ops queue_sysfs_ops = {
.show = queue_attr_show,
.store = queue_attr_store,
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index fc13dd0..a7285bf 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -27,6 +27,13 @@
#define MIN_THROTL_IOPS (10)
#define DFL_LATENCY_TARGET (-1L)
#define DFL_IDLE_THRESHOLD (0)
+#define DFL_HD_BASELINE_LATENCY (4000L) /* 4ms */
+#define LATENCY_FILTERED_SSD (0)
+/*
+ * For HD, very small latency comes from sequential IO. Such IO is helpless to
+ * help determine if its IO is impacted by others, hence we ignore the IO
+ */
+#define LATENCY_FILTERED_HD (1000L) /* 1ms */
#define SKIP_LATENCY (((u64)1) << BLK_STAT_RES_SHIFT)
@@ -212,6 +219,7 @@
struct avg_latency_bucket avg_buckets[LATENCY_BUCKET_SIZE];
struct latency_bucket __percpu *latency_buckets;
unsigned long last_calculate_time;
+ unsigned long filtered_latency;
bool track_bio_latency;
};
@@ -698,7 +706,7 @@
static void throtl_schedule_pending_timer(struct throtl_service_queue *sq,
unsigned long expires)
{
- unsigned long max_expire = jiffies + 8 * sq_to_tg(sq)->td->throtl_slice;
+ unsigned long max_expire = jiffies + 8 * sq_to_td(sq)->throtl_slice;
/*
* Since we are adjusting the throttle limit dynamically, the sleep
@@ -2281,7 +2289,7 @@
throtl_track_latency(tg->td, blk_stat_size(&bio->bi_issue_stat),
bio_op(bio), lat);
- if (tg->latency_target) {
+ if (tg->latency_target && lat >= tg->td->filtered_latency) {
int bucket;
unsigned int threshold;
@@ -2417,14 +2425,20 @@
void blk_throtl_register_queue(struct request_queue *q)
{
struct throtl_data *td;
+ int i;
td = q->td;
BUG_ON(!td);
- if (blk_queue_nonrot(q))
+ if (blk_queue_nonrot(q)) {
td->throtl_slice = DFL_THROTL_SLICE_SSD;
- else
+ td->filtered_latency = LATENCY_FILTERED_SSD;
+ } else {
td->throtl_slice = DFL_THROTL_SLICE_HD;
+ td->filtered_latency = LATENCY_FILTERED_HD;
+ for (i = 0; i < LATENCY_BUCKET_SIZE; i++)
+ td->avg_buckets[i].latency = DFL_HD_BASELINE_LATENCY;
+ }
#ifndef CONFIG_BLK_DEV_THROTTLING_LOW
/* if no low limit, use previous default */
td->throtl_slice = DFL_THROTL_SLICE_HD;
diff --git a/crypto/asymmetric_keys/public_key.c b/crypto/asymmetric_keys/public_key.c
index d3a989e..3cd6e12 100644
--- a/crypto/asymmetric_keys/public_key.c
+++ b/crypto/asymmetric_keys/public_key.c
@@ -141,7 +141,7 @@
* signature and returns that to us.
*/
ret = crypto_akcipher_verify(req);
- if (ret == -EINPROGRESS) {
+ if ((ret == -EINPROGRESS) || (ret == -EBUSY)) {
wait_for_completion(&compl.completion);
ret = compl.err;
}
diff --git a/crypto/asymmetric_keys/verify_pefile.c b/crypto/asymmetric_keys/verify_pefile.c
index 672a94c..d178650 100644
--- a/crypto/asymmetric_keys/verify_pefile.c
+++ b/crypto/asymmetric_keys/verify_pefile.c
@@ -381,7 +381,7 @@
}
error:
- kfree(desc);
+ kzfree(desc);
error_no_desc:
crypto_free_shash(tfm);
kleave(" = %d", ret);
@@ -450,6 +450,6 @@
ret = pefile_digest_pe(pebuf, pelen, &ctx);
error:
- kfree(ctx.digest);
+ kzfree(ctx.digest);
return ret;
}
diff --git a/crypto/asymmetric_keys/x509_cert_parser.c b/crypto/asymmetric_keys/x509_cert_parser.c
index c80765b2..dd03fea 100644
--- a/crypto/asymmetric_keys/x509_cert_parser.c
+++ b/crypto/asymmetric_keys/x509_cert_parser.c
@@ -102,6 +102,7 @@
}
}
+ ret = -ENOMEM;
cert->pub->key = kmemdup(ctx->key, ctx->key_size, GFP_KERNEL);
if (!cert->pub->key)
goto error_decode;
diff --git a/crypto/drbg.c b/crypto/drbg.c
index fa749f4..cdb27ac 100644
--- a/crypto/drbg.c
+++ b/crypto/drbg.c
@@ -1767,9 +1767,8 @@
break;
case -EINPROGRESS:
case -EBUSY:
- ret = wait_for_completion_interruptible(
- &drbg->ctr_completion);
- if (!ret && !drbg->ctr_async_err) {
+ wait_for_completion(&drbg->ctr_completion);
+ if (!drbg->ctr_async_err) {
reinit_completion(&drbg->ctr_completion);
break;
}
diff --git a/crypto/gcm.c b/crypto/gcm.c
index b7ad808..3841b5e 100644
--- a/crypto/gcm.c
+++ b/crypto/gcm.c
@@ -152,10 +152,8 @@
err = crypto_skcipher_encrypt(&data->req);
if (err == -EINPROGRESS || err == -EBUSY) {
- err = wait_for_completion_interruptible(
- &data->result.completion);
- if (!err)
- err = data->result.err;
+ wait_for_completion(&data->result.completion);
+ err = data->result.err;
}
if (err)
diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c
index 7abe665..0d2e989 100644
--- a/drivers/acpi/acpica/tbutils.c
+++ b/drivers/acpi/acpica/tbutils.c
@@ -416,9 +416,18 @@
}
}
- table_desc->validation_count++;
- if (table_desc->validation_count == 0) {
- table_desc->validation_count--;
+ if (table_desc->validation_count < ACPI_MAX_TABLE_VALIDATIONS) {
+ table_desc->validation_count++;
+
+ /*
+ * Detect validation_count overflows to ensure that the warning
+ * message will only be printed once.
+ */
+ if (table_desc->validation_count >= ACPI_MAX_TABLE_VALIDATIONS) {
+ ACPI_WARNING((AE_INFO,
+ "Table %p, Validation count overflows\n",
+ table_desc));
+ }
}
*out_table = table_desc->pointer;
@@ -445,13 +454,20 @@
ACPI_FUNCTION_TRACE(acpi_tb_put_table);
- if (table_desc->validation_count == 0) {
- ACPI_WARNING((AE_INFO,
- "Table %p, Validation count is zero before decrement\n",
- table_desc));
- return_VOID;
+ if (table_desc->validation_count < ACPI_MAX_TABLE_VALIDATIONS) {
+ table_desc->validation_count--;
+
+ /*
+ * Detect validation_count underflows to ensure that the warning
+ * message will only be printed once.
+ */
+ if (table_desc->validation_count >= ACPI_MAX_TABLE_VALIDATIONS) {
+ ACPI_WARNING((AE_INFO,
+ "Table %p, Validation count underflows\n",
+ table_desc));
+ return_VOID;
+ }
}
- table_desc->validation_count--;
if (table_desc->validation_count == 0) {
diff --git a/drivers/acpi/acpica/utresrc.c b/drivers/acpi/acpica/utresrc.c
index e0587c8..ff096d9 100644
--- a/drivers/acpi/acpica/utresrc.c
+++ b/drivers/acpi/acpica/utresrc.c
@@ -474,15 +474,6 @@
return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG);
}
- /*
- * The end_tag opcode must be followed by a zero byte.
- * Although this byte is technically defined to be a checksum,
- * in practice, all ASL compilers set this byte to zero.
- */
- if (*(aml + 1) != 0) {
- return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG);
- }
-
/* Return the pointer to the end_tag if requested */
if (!user_function) {
diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
index c5fecf9..797b28d 100644
--- a/drivers/acpi/arm64/iort.c
+++ b/drivers/acpi/arm64/iort.c
@@ -666,14 +666,6 @@
int ret = -ENODEV;
struct fwnode_handle *iort_fwnode;
- /*
- * If we already translated the fwspec there
- * is nothing left to do, return the iommu_ops.
- */
- ops = iort_fwspec_iommu_ops(dev->iommu_fwspec);
- if (ops)
- return ops;
-
if (node) {
iort_fwnode = iort_get_fwnode(node);
if (!iort_fwnode)
@@ -735,6 +727,14 @@
u32 streamid = 0;
int err;
+ /*
+ * If we already translated the fwspec there
+ * is nothing left to do, return the iommu_ops.
+ */
+ ops = iort_fwspec_iommu_ops(dev->iommu_fwspec);
+ if (ops)
+ return ops;
+
if (dev_is_pci(dev)) {
struct pci_bus *bus = to_pci_dev(dev)->bus;
u32 rid;
@@ -782,6 +782,12 @@
if (err)
ops = ERR_PTR(err);
+ /* Ignore all other errors apart from EPROBE_DEFER */
+ if (IS_ERR(ops) && (PTR_ERR(ops) != -EPROBE_DEFER)) {
+ dev_dbg(dev, "Adding to IOMMU failed: %ld\n", PTR_ERR(ops));
+ ops = NULL;
+ }
+
return ops;
}
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index a9a9ab3..d42eeef 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -782,7 +782,7 @@
if ((battery->state & ACPI_BATTERY_STATE_CRITICAL) ||
(test_bit(ACPI_BATTERY_ALARM_PRESENT, &battery->flags) &&
(battery->capacity_now <= battery->alarm)))
- pm_wakeup_hard_event(&battery->device->dev);
+ pm_wakeup_event(&battery->device->dev, 0);
return result;
}
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index 9ad8cdb..e19f530 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -217,7 +217,7 @@
}
if (state)
- pm_wakeup_hard_event(&device->dev);
+ pm_wakeup_event(&device->dev, 0);
ret = blocking_notifier_call_chain(&acpi_lid_notifier, state, device);
if (ret == NOTIFY_DONE)
@@ -402,7 +402,7 @@
} else {
int keycode;
- pm_wakeup_hard_event(&device->dev);
+ pm_wakeup_event(&device->dev, 0);
if (button->suspended)
break;
@@ -534,7 +534,6 @@
lid_device = device;
}
- device_init_wakeup(&device->dev, true);
printk(KERN_INFO PREFIX "%s [%s]\n", name, acpi_device_bid(device));
return 0;
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index 798d500..993fd31 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -24,7 +24,6 @@
#include <linux/pm_qos.h>
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
-#include <linux/suspend.h>
#include "internal.h"
@@ -400,7 +399,7 @@
mutex_lock(&acpi_pm_notifier_lock);
if (adev->wakeup.flags.notifier_present) {
- pm_wakeup_ws_event(adev->wakeup.ws, 0, true);
+ __pm_wakeup_event(adev->wakeup.ws, 0);
if (adev->wakeup.context.work.func)
queue_pm_work(&adev->wakeup.context.work);
}
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index e39ec7b..3a10d757 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -1371,8 +1371,8 @@
iort_set_dma_mask(dev);
iommu = iort_iommu_configure(dev);
- if (IS_ERR(iommu))
- return PTR_ERR(iommu);
+ if (IS_ERR(iommu) && PTR_ERR(iommu) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
size = max(dev->coherent_dma_mask, dev->coherent_dma_mask + 1);
/*
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index a6574d6..097d630 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -663,40 +663,14 @@
acpi_os_wait_events_complete();
if (acpi_sci_irq_valid())
enable_irq_wake(acpi_sci_irq);
-
return 0;
}
-static void acpi_freeze_wake(void)
-{
- /*
- * If IRQD_WAKEUP_ARMED is not set for the SCI at this point, it means
- * that the SCI has triggered while suspended, so cancel the wakeup in
- * case it has not been a wakeup event (the GPEs will be checked later).
- */
- if (acpi_sci_irq_valid() &&
- !irqd_is_wakeup_armed(irq_get_irq_data(acpi_sci_irq)))
- pm_system_cancel_wakeup();
-}
-
-static void acpi_freeze_sync(void)
-{
- /*
- * Process all pending events in case there are any wakeup ones.
- *
- * The EC driver uses the system workqueue, so that one needs to be
- * flushed too.
- */
- acpi_os_wait_events_complete();
- flush_scheduled_work();
-}
-
static void acpi_freeze_restore(void)
{
acpi_disable_wakeup_devices(ACPI_STATE_S0);
if (acpi_sci_irq_valid())
disable_irq_wake(acpi_sci_irq);
-
acpi_enable_all_runtime_gpes();
}
@@ -708,8 +682,6 @@
static const struct platform_freeze_ops acpi_freeze_ops = {
.begin = acpi_freeze_begin,
.prepare = acpi_freeze_prepare,
- .wake = acpi_freeze_wake,
- .sync = acpi_freeze_sync,
.restore = acpi_freeze_restore,
.end = acpi_freeze_end,
};
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 2fc5240..c699540 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -1364,6 +1364,40 @@
{}
#endif
+/*
+ * On the Acer Aspire Switch Alpha 12, sometimes all SATA ports are detected
+ * as DUMMY, or detected but eventually get a "link down" and never get up
+ * again. When this happens, CAP.NP may hold a value of 0x00 or 0x01, and the
+ * port_map may hold a value of 0x00.
+ *
+ * Overriding CAP.NP to 0x02 and the port_map to 0x7 will reveal all 3 ports
+ * and can significantly reduce the occurrence of the problem.
+ *
+ * https://bugzilla.kernel.org/show_bug.cgi?id=189471
+ */
+static void acer_sa5_271_workaround(struct ahci_host_priv *hpriv,
+ struct pci_dev *pdev)
+{
+ static const struct dmi_system_id sysids[] = {
+ {
+ .ident = "Acer Switch Alpha 12",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Switch SA5-271")
+ },
+ },
+ { }
+ };
+
+ if (dmi_check_system(sysids)) {
+ dev_info(&pdev->dev, "enabling Acer Switch Alpha 12 workaround\n");
+ if ((hpriv->saved_cap & 0xC734FF00) == 0xC734FF00) {
+ hpriv->port_map = 0x7;
+ hpriv->cap = 0xC734FF02;
+ }
+ }
+}
+
#ifdef CONFIG_ARM64
/*
* Due to ERRATA#22536, ThunderX needs to handle HOST_IRQ_STAT differently.
@@ -1636,6 +1670,10 @@
"online status unreliable, applying workaround\n");
}
+
+ /* Acer SA5-271 workaround modifies private_data */
+ acer_sa5_271_workaround(hpriv, pdev);
+
/* CAP.NP sometimes indicate the index of the last enabled
* port, at other times, that of the last possible port, so
* determining the maximum port number requires looking at
diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c
index aaa761b..cd2eab6 100644
--- a/drivers/ata/libahci_platform.c
+++ b/drivers/ata/libahci_platform.c
@@ -514,8 +514,9 @@
irq = platform_get_irq(pdev, 0);
if (irq <= 0) {
- dev_err(dev, "no irq\n");
- return -EINVAL;
+ if (irq != -EPROBE_DEFER)
+ dev_err(dev, "no irq\n");
+ return irq;
}
hpriv->irq = irq;
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 2d83b8c..e157a0e 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -6800,7 +6800,7 @@
}
force_ent->port = simple_strtoul(id, &endp, 10);
- if (p == endp || *endp != '\0') {
+ if (id == endp || *endp != '\0') {
*reason = "invalid port/link";
return -EINVAL;
}
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index b66bcda..3b2246d 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -4067,7 +4067,6 @@
struct ata_host *host;
struct mv_host_priv *hpriv;
struct resource *res;
- void __iomem *mmio;
int n_ports = 0, irq = 0;
int rc;
int port;
@@ -4086,9 +4085,8 @@
* Get the register base first
*/
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mmio = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(mmio))
- return PTR_ERR(mmio);
+ if (res == NULL)
+ return -EINVAL;
/* allocate host */
if (pdev->dev.of_node) {
@@ -4132,7 +4130,12 @@
hpriv->board_idx = chip_soc;
host->iomap = NULL;
- hpriv->base = mmio - SATAHC0_REG_BASE;
+ hpriv->base = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+ if (!hpriv->base)
+ return -ENOMEM;
+
+ hpriv->base -= SATAHC0_REG_BASE;
hpriv->clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(hpriv->clk))
diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c
index 5d38245..b7939a2 100644
--- a/drivers/ata/sata_rcar.c
+++ b/drivers/ata/sata_rcar.c
@@ -890,7 +890,10 @@
dev_err(&pdev->dev, "failed to get access to sata clock\n");
return PTR_ERR(priv->clk);
}
- clk_prepare_enable(priv->clk);
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ return ret;
host = ata_host_alloc(&pdev->dev, 1);
if (!host) {
@@ -970,8 +973,11 @@
struct ata_host *host = dev_get_drvdata(dev);
struct sata_rcar_priv *priv = host->private_data;
void __iomem *base = priv->base;
+ int ret;
- clk_prepare_enable(priv->clk);
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ return ret;
/* ack and mask */
iowrite32(0, base + SATAINTSTAT_REG);
@@ -988,8 +994,11 @@
{
struct ata_host *host = dev_get_drvdata(dev);
struct sata_rcar_priv *priv = host->private_data;
+ int ret;
- clk_prepare_enable(priv->clk);
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ return ret;
sata_rcar_setup_port(host);
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index e987a6f..9faee1c 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -1091,6 +1091,11 @@
if (async_error)
goto Complete;
+ if (pm_wakeup_pending()) {
+ async_error = -EBUSY;
+ goto Complete;
+ }
+
if (dev->power.syscore || dev->power.direct_complete)
goto Complete;
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index 9c36b27..c313b60 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -28,8 +28,8 @@
/* First wakeup IRQ seen by the kernel in the last cycle. */
unsigned int pm_wakeup_irq __read_mostly;
-/* If greater than 0 and the system is suspending, terminate the suspend. */
-static atomic_t pm_abort_suspend __read_mostly;
+/* If set and the system is suspending, terminate the suspend. */
+static bool pm_abort_suspend __read_mostly;
/*
* Combined counters of registered wakeup events and wakeup events in progress.
@@ -855,26 +855,20 @@
pm_print_active_wakeup_sources();
}
- return ret || atomic_read(&pm_abort_suspend) > 0;
+ return ret || pm_abort_suspend;
}
void pm_system_wakeup(void)
{
- atomic_inc(&pm_abort_suspend);
+ pm_abort_suspend = true;
freeze_wake();
}
EXPORT_SYMBOL_GPL(pm_system_wakeup);
-void pm_system_cancel_wakeup(void)
+void pm_wakeup_clear(void)
{
- atomic_dec(&pm_abort_suspend);
-}
-
-void pm_wakeup_clear(bool reset)
-{
+ pm_abort_suspend = false;
pm_wakeup_irq = 0;
- if (reset)
- atomic_set(&pm_abort_suspend, 0);
}
void pm_system_irq_wakeup(unsigned int irq_number)
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 28d9329..ebbd0c3 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -608,6 +608,9 @@
*/
static int loop_flush(struct loop_device *lo)
{
+ /* loop not yet configured, no running thread, nothing to flush */
+ if (lo->lo_state != Lo_bound)
+ return 0;
return loop_switch(lo, NULL);
}
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 6e0cbe0..593a881 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -343,7 +343,7 @@
phys_addr_t offset = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
/* It's illegal to wrap around the end of the physical address space. */
- if (offset + (phys_addr_t)size < offset)
+ if (offset + (phys_addr_t)size - 1 < offset)
return -EINVAL;
if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
diff --git a/drivers/char/random.c b/drivers/char/random.c
index a561f0c..e870f32 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1,6 +1,9 @@
/*
* random.c -- A strong random number generator
*
+ * Copyright (C) 2017 Jason A. Donenfeld <Jason@zx2c4.com>. All
+ * Rights Reserved.
+ *
* Copyright Matt Mackall <mpm@selenic.com>, 2003, 2004, 2005
*
* Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999. All
@@ -762,6 +765,8 @@
static struct crng_state **crng_node_pool __read_mostly;
#endif
+static void invalidate_batched_entropy(void);
+
static void crng_initialize(struct crng_state *crng)
{
int i;
@@ -799,6 +804,7 @@
cp++; crng_init_cnt++; len--;
}
if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) {
+ invalidate_batched_entropy();
crng_init = 1;
wake_up_interruptible(&crng_init_wait);
pr_notice("random: fast init done\n");
@@ -836,6 +842,7 @@
memzero_explicit(&buf, sizeof(buf));
crng->init_time = jiffies;
if (crng == &primary_crng && crng_init < 2) {
+ invalidate_batched_entropy();
crng_init = 2;
process_random_ready_list();
wake_up_interruptible(&crng_init_wait);
@@ -1097,15 +1104,15 @@
static __u32 get_reg(struct fast_pool *f, struct pt_regs *regs)
{
__u32 *ptr = (__u32 *) regs;
- unsigned long flags;
+ unsigned int idx;
if (regs == NULL)
return 0;
- local_irq_save(flags);
- if (f->reg_idx >= sizeof(struct pt_regs) / sizeof(__u32))
- f->reg_idx = 0;
- ptr += f->reg_idx++;
- local_irq_restore(flags);
+ idx = READ_ONCE(f->reg_idx);
+ if (idx >= sizeof(struct pt_regs) / sizeof(__u32))
+ idx = 0;
+ ptr += idx++;
+ WRITE_ONCE(f->reg_idx, idx);
return *ptr;
}
@@ -2023,6 +2030,7 @@
};
unsigned int position;
};
+static rwlock_t batched_entropy_reset_lock = __RW_LOCK_UNLOCKED(batched_entropy_reset_lock);
/*
* Get a random word for internal kernel use only. The quality of the random
@@ -2033,6 +2041,8 @@
u64 get_random_u64(void)
{
u64 ret;
+ bool use_lock = crng_init < 2;
+ unsigned long flags;
struct batched_entropy *batch;
#if BITS_PER_LONG == 64
@@ -2045,11 +2055,15 @@
#endif
batch = &get_cpu_var(batched_entropy_u64);
+ if (use_lock)
+ read_lock_irqsave(&batched_entropy_reset_lock, flags);
if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) {
extract_crng((u8 *)batch->entropy_u64);
batch->position = 0;
}
ret = batch->entropy_u64[batch->position++];
+ if (use_lock)
+ read_unlock_irqrestore(&batched_entropy_reset_lock, flags);
put_cpu_var(batched_entropy_u64);
return ret;
}
@@ -2059,22 +2073,45 @@
u32 get_random_u32(void)
{
u32 ret;
+ bool use_lock = crng_init < 2;
+ unsigned long flags;
struct batched_entropy *batch;
if (arch_get_random_int(&ret))
return ret;
batch = &get_cpu_var(batched_entropy_u32);
+ if (use_lock)
+ read_lock_irqsave(&batched_entropy_reset_lock, flags);
if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) {
extract_crng((u8 *)batch->entropy_u32);
batch->position = 0;
}
ret = batch->entropy_u32[batch->position++];
+ if (use_lock)
+ read_unlock_irqrestore(&batched_entropy_reset_lock, flags);
put_cpu_var(batched_entropy_u32);
return ret;
}
EXPORT_SYMBOL(get_random_u32);
+/* It's important to invalidate all potential batched entropy that might
+ * be stored before the crng is initialized, which we can do lazily by
+ * simply resetting the counter to zero so that it's re-extracted on the
+ * next usage. */
+static void invalidate_batched_entropy(void)
+{
+ int cpu;
+ unsigned long flags;
+
+ write_lock_irqsave(&batched_entropy_reset_lock, flags);
+ for_each_possible_cpu (cpu) {
+ per_cpu_ptr(&batched_entropy_u32, cpu)->position = 0;
+ per_cpu_ptr(&batched_entropy_u64, cpu)->position = 0;
+ }
+ write_unlock_irqrestore(&batched_entropy_reset_lock, flags);
+}
+
/**
* randomize_page - Generate a random, page aligned address
* @start: The smallest acceptable address the caller will take.
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index a89d41c..aae87c4 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -1209,9 +1209,9 @@
return 0;
}
- rate = readl_relaxed(frame + CNTFRQ);
+ rate = readl_relaxed(base + CNTFRQ);
- iounmap(frame);
+ iounmap(base);
return rate;
}
diff --git a/drivers/clocksource/cadence_ttc_timer.c b/drivers/clocksource/cadence_ttc_timer.c
index a144dfc..29d5175 100644
--- a/drivers/clocksource/cadence_ttc_timer.c
+++ b/drivers/clocksource/cadence_ttc_timer.c
@@ -18,6 +18,7 @@
#include <linux/clk.h>
#include <linux/interrupt.h>
#include <linux/clockchips.h>
+#include <linux/clocksource.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/slab.h>
diff --git a/drivers/clocksource/timer-sun5i.c b/drivers/clocksource/timer-sun5i.c
index a4ebc8f..2a3fe83 100644
--- a/drivers/clocksource/timer-sun5i.c
+++ b/drivers/clocksource/timer-sun5i.c
@@ -12,6 +12,7 @@
#include <linux/clk.h>
#include <linux/clockchips.h>
+#include <linux/clocksource.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 992f7c2..88220ff 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -185,8 +185,8 @@
int ret;
ret = sscanf(buf, "%u", &input);
- /* cannot be lower than 11 otherwise freq will not fall */
- if (ret != 1 || input < 11 || input > 100 ||
+ /* cannot be lower than 1 otherwise freq will not fall */
+ if (ret != 1 || input < 1 || input > 100 ||
input >= dbs_data->up_threshold)
return -EINVAL;
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index b7de5bd..eb11585 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -571,9 +571,10 @@
static int min_perf_pct_min(void)
{
struct cpudata *cpu = all_cpu_data[0];
+ int turbo_pstate = cpu->pstate.turbo_pstate;
- return DIV_ROUND_UP(cpu->pstate.min_pstate * 100,
- cpu->pstate.turbo_pstate);
+ return turbo_pstate ?
+ DIV_ROUND_UP(cpu->pstate.min_pstate * 100, turbo_pstate) : 0;
}
static s16 intel_pstate_get_epb(struct cpudata *cpu_data)
diff --git a/drivers/cpuidle/dt_idle_states.c b/drivers/cpuidle/dt_idle_states.c
index ffca4fc..ae8eb03 100644
--- a/drivers/cpuidle/dt_idle_states.c
+++ b/drivers/cpuidle/dt_idle_states.c
@@ -180,8 +180,10 @@
if (!state_node)
break;
- if (!of_device_is_available(state_node))
+ if (!of_device_is_available(state_node)) {
+ of_node_put(state_node);
continue;
+ }
if (!idle_state_valid(state_node, i, cpumask)) {
pr_warn("%s idle state not valid, bailing out\n",
diff --git a/drivers/dax/super.c b/drivers/dax/super.c
index 6ed32aa..922d082 100644
--- a/drivers/dax/super.c
+++ b/drivers/dax/super.c
@@ -210,9 +210,12 @@
static struct inode *dax_alloc_inode(struct super_block *sb)
{
struct dax_device *dax_dev;
+ struct inode *inode;
dax_dev = kmem_cache_alloc(dax_cache, GFP_KERNEL);
- return &dax_dev->inode;
+ inode = &dax_dev->inode;
+ inode->i_rdev = 0;
+ return inode;
}
static struct dax_device *to_dax_dev(struct inode *inode)
@@ -227,7 +230,8 @@
kfree(dax_dev->host);
dax_dev->host = NULL;
- ida_simple_remove(&dax_minor_ida, MINOR(inode->i_rdev));
+ if (inode->i_rdev)
+ ida_simple_remove(&dax_minor_ida, MINOR(inode->i_rdev));
kmem_cache_free(dax_cache, dax_dev);
}
@@ -423,6 +427,7 @@
struct dax_device *dax_dev = _dax_dev;
struct inode *inode = &dax_dev->inode;
+ memset(dax_dev, 0, sizeof(*dax_dev));
inode_init_once(inode);
}
diff --git a/drivers/devfreq/event/exynos-nocp.c b/drivers/devfreq/event/exynos-nocp.c
index 5c3e7b1..f6e7956 100644
--- a/drivers/devfreq/event/exynos-nocp.c
+++ b/drivers/devfreq/event/exynos-nocp.c
@@ -267,7 +267,11 @@
}
platform_set_drvdata(pdev, nocp);
- clk_prepare_enable(nocp->clk);
+ ret = clk_prepare_enable(nocp->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to prepare ppmu clock\n");
+ return ret;
+ }
pr_info("exynos-nocp: new NoC Probe device registered: %s\n",
dev_name(dev));
diff --git a/drivers/devfreq/event/exynos-ppmu.c b/drivers/devfreq/event/exynos-ppmu.c
index 9b73509..d96e3dc 100644
--- a/drivers/devfreq/event/exynos-ppmu.c
+++ b/drivers/devfreq/event/exynos-ppmu.c
@@ -44,7 +44,7 @@
{ "ppmu-event2-"#name, PPMU_PMNCNT2 }, \
{ "ppmu-event3-"#name, PPMU_PMNCNT3 }
-struct __exynos_ppmu_events {
+static struct __exynos_ppmu_events {
char *name;
int id;
} ppmu_events[] = {
@@ -648,7 +648,11 @@
dev_name(&pdev->dev), desc[i].name);
}
- clk_prepare_enable(info->ppmu.clk);
+ ret = clk_prepare_enable(info->ppmu.clk);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to prepare ppmu clock\n");
+ return ret;
+ }
return 0;
}
diff --git a/drivers/firmware/dmi-id.c b/drivers/firmware/dmi-id.c
index dc269cb..951b6c7 100644
--- a/drivers/firmware/dmi-id.c
+++ b/drivers/firmware/dmi-id.c
@@ -47,7 +47,7 @@
DEFINE_DMI_ATTR_WITH_SHOW(product_version, 0444, DMI_PRODUCT_VERSION);
DEFINE_DMI_ATTR_WITH_SHOW(product_serial, 0400, DMI_PRODUCT_SERIAL);
DEFINE_DMI_ATTR_WITH_SHOW(product_uuid, 0400, DMI_PRODUCT_UUID);
-DEFINE_DMI_ATTR_WITH_SHOW(product_family, 0400, DMI_PRODUCT_FAMILY);
+DEFINE_DMI_ATTR_WITH_SHOW(product_family, 0444, DMI_PRODUCT_FAMILY);
DEFINE_DMI_ATTR_WITH_SHOW(board_vendor, 0444, DMI_BOARD_VENDOR);
DEFINE_DMI_ATTR_WITH_SHOW(board_name, 0444, DMI_BOARD_NAME);
DEFINE_DMI_ATTR_WITH_SHOW(board_version, 0444, DMI_BOARD_VERSION);
@@ -192,7 +192,7 @@
ADD_DMI_ATTR(product_version, DMI_PRODUCT_VERSION);
ADD_DMI_ATTR(product_serial, DMI_PRODUCT_SERIAL);
ADD_DMI_ATTR(product_uuid, DMI_PRODUCT_UUID);
- ADD_DMI_ATTR(product_family, DMI_PRODUCT_FAMILY);
+ ADD_DMI_ATTR(product_family, DMI_PRODUCT_FAMILY);
ADD_DMI_ATTR(board_vendor, DMI_BOARD_VENDOR);
ADD_DMI_ATTR(board_name, DMI_BOARD_NAME);
ADD_DMI_ATTR(board_version, DMI_BOARD_VERSION);
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index 93f7acd..7830419 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -144,7 +144,7 @@
buf = dmi_early_remap(dmi_base, orig_dmi_len);
if (buf == NULL)
- return -1;
+ return -ENOMEM;
dmi_decode_table(buf, decode, NULL);
@@ -178,7 +178,7 @@
const char *d = (const char *) dm;
const char *p;
- if (dmi_ident[slot])
+ if (dmi_ident[slot] || dm->length <= string)
return;
p = dmi_string(dm, d[string]);
@@ -191,13 +191,14 @@
static void __init dmi_save_uuid(const struct dmi_header *dm, int slot,
int index)
{
- const u8 *d = (u8 *) dm + index;
+ const u8 *d;
char *s;
int is_ff = 1, is_00 = 1, i;
- if (dmi_ident[slot])
+ if (dmi_ident[slot] || dm->length <= index + 16)
return;
+ d = (u8 *) dm + index;
for (i = 0; i < 16 && (is_ff || is_00); i++) {
if (d[i] != 0x00)
is_00 = 0;
@@ -228,16 +229,17 @@
static void __init dmi_save_type(const struct dmi_header *dm, int slot,
int index)
{
- const u8 *d = (u8 *) dm + index;
+ const u8 *d;
char *s;
- if (dmi_ident[slot])
+ if (dmi_ident[slot] || dm->length <= index)
return;
s = dmi_alloc(4);
if (!s)
return;
+ d = (u8 *) dm + index;
sprintf(s, "%u", *d & 0x7F);
dmi_ident[slot] = s;
}
@@ -278,9 +280,13 @@
static void __init dmi_save_oem_strings_devices(const struct dmi_header *dm)
{
- int i, count = *(u8 *)(dm + 1);
+ int i, count;
struct dmi_device *dev;
+ if (dm->length < 0x05)
+ return;
+
+ count = *(u8 *)(dm + 1);
for (i = 1; i <= count; i++) {
const char *devname = dmi_string(dm, i);
@@ -353,6 +359,9 @@
const char *name;
const u8 *d = (u8 *)dm;
+ if (dm->length < 0x0B)
+ return;
+
/* Skip disabled device */
if ((d[0x5] & 0x80) == 0)
return;
@@ -387,7 +396,7 @@
const char *d = (const char *)dm;
static int nr;
- if (dm->type != DMI_ENTRY_MEM_DEVICE)
+ if (dm->type != DMI_ENTRY_MEM_DEVICE || dm->length < 0x12)
return;
if (nr >= dmi_memdev_nr) {
pr_warn(FW_BUG "Too many DIMM entries in SMBIOS table\n");
@@ -650,6 +659,21 @@
goto error;
/*
+ * Same logic as above, look for a 64-bit entry point
+ * first, and if not found, fall back to 32-bit entry point.
+ */
+ memcpy_fromio(buf, p, 16);
+ for (q = p + 16; q < p + 0x10000; q += 16) {
+ memcpy_fromio(buf + 16, q, 16);
+ if (!dmi_smbios3_present(buf)) {
+ dmi_available = 1;
+ dmi_early_unmap(p, 0x10000);
+ goto out;
+ }
+ memcpy(buf, buf + 16, 16);
+ }
+
+ /*
* Iterate over all possible DMI header addresses q.
* Maintain the 32 bytes around q in buf. On the
* first iteration, substitute zero for the
@@ -659,7 +683,7 @@
memset(buf, 0, 16);
for (q = p; q < p + 0x10000; q += 16) {
memcpy_fromio(buf + 16, q, 16);
- if (!dmi_smbios3_present(buf) || !dmi_present(buf)) {
+ if (!dmi_present(buf)) {
dmi_available = 1;
dmi_early_unmap(p, 0x10000);
goto out;
@@ -993,7 +1017,8 @@
* @decode: Callback function
* @private_data: Private data to be passed to the callback function
*
- * Returns -1 when the DMI table can't be reached, 0 on success.
+ * Returns 0 on success, -ENXIO if DMI is not selected or not present,
+ * or a different negative error code if DMI walking fails.
*/
int dmi_walk(void (*decode)(const struct dmi_header *, void *),
void *private_data)
@@ -1001,11 +1026,11 @@
u8 *buf;
if (!dmi_available)
- return -1;
+ return -ENXIO;
buf = dmi_remap(dmi_base, dmi_len);
if (buf == NULL)
- return -1;
+ return -ENOMEM;
dmi_decode_table(buf, decode, private_data);
diff --git a/drivers/firmware/efi/efi-bgrt.c b/drivers/firmware/efi/efi-bgrt.c
index 8bf2732..b58233e 100644
--- a/drivers/firmware/efi/efi-bgrt.c
+++ b/drivers/firmware/efi/efi-bgrt.c
@@ -27,6 +27,26 @@
u32 size;
} __packed;
+static bool efi_bgrt_addr_valid(u64 addr)
+{
+ efi_memory_desc_t *md;
+
+ for_each_efi_memory_desc(md) {
+ u64 size;
+ u64 end;
+
+ if (md->type != EFI_BOOT_SERVICES_DATA)
+ continue;
+
+ size = md->num_pages << EFI_PAGE_SHIFT;
+ end = md->phys_addr + size;
+ if (addr >= md->phys_addr && addr < end)
+ return true;
+ }
+
+ return false;
+}
+
void __init efi_bgrt_init(struct acpi_table_header *table)
{
void *image;
@@ -36,7 +56,7 @@
if (acpi_disabled)
return;
- if (!efi_enabled(EFI_BOOT))
+ if (!efi_enabled(EFI_MEMMAP))
return;
if (table->length < sizeof(bgrt_tab)) {
@@ -65,6 +85,10 @@
goto out;
}
+ if (!efi_bgrt_addr_valid(bgrt->image_address)) {
+ pr_notice("Ignoring BGRT: invalid image address\n");
+ goto out;
+ }
image = early_memremap(bgrt->image_address, sizeof(bmp_header));
if (!image) {
pr_notice("Ignoring BGRT: failed to map image header memory\n");
diff --git a/drivers/firmware/google/vpd.c b/drivers/firmware/google/vpd.c
index 1e7860f..31058d40 100644
--- a/drivers/firmware/google/vpd.c
+++ b/drivers/firmware/google/vpd.c
@@ -136,12 +136,12 @@
info->value = value;
INIT_LIST_HEAD(&info->list);
- list_add_tail(&info->list, &sec->attribs);
ret = sysfs_create_bin_file(sec->kobj, &info->bin_attr);
if (ret)
goto free_info_key;
+ list_add_tail(&info->list, &sec->attribs);
return 0;
free_info_key:
@@ -158,8 +158,8 @@
struct vpd_attrib_info *temp;
list_for_each_entry_safe(info, temp, &sec->attribs, list) {
- kfree(info->key);
sysfs_remove_bin_file(sec->kobj, &info->bin_attr);
+ kfree(info->key);
kfree(info);
}
}
@@ -244,7 +244,7 @@
{
if (sec->enabled) {
vpd_section_attrib_destroy(sec);
- kobject_del(sec->kobj);
+ kobject_put(sec->kobj);
sysfs_remove_bin_file(vpd_kobj, &sec->bin_attr);
kfree(sec->raw_name);
iounmap(sec->baseaddr);
@@ -331,7 +331,7 @@
{
vpd_section_destroy(&ro_vpd);
vpd_section_destroy(&rw_vpd);
- kobject_del(vpd_kobj);
+ kobject_put(vpd_kobj);
}
module_init(vpd_platform_init);
diff --git a/drivers/gpio/gpio-aspeed.c b/drivers/gpio/gpio-aspeed.c
index ccea609..4ca436e 100644
--- a/drivers/gpio/gpio-aspeed.c
+++ b/drivers/gpio/gpio-aspeed.c
@@ -646,6 +646,9 @@
int rc;
int i;
+ if (!gpio->clk)
+ return -EINVAL;
+
rc = usecs_to_cycles(gpio, usecs, &requested_cycles);
if (rc < 0) {
dev_warn(chip->parent, "Failed to convert %luus to cycles at %luHz: %d\n",
diff --git a/drivers/gpio/gpio-crystalcove.c b/drivers/gpio/gpio-crystalcove.c
index 2197368..e60156e 100644
--- a/drivers/gpio/gpio-crystalcove.c
+++ b/drivers/gpio/gpio-crystalcove.c
@@ -90,8 +90,18 @@
{
int reg;
- if (gpio == 94)
- return GPIOPANELCTL;
+ if (gpio >= CRYSTALCOVE_GPIO_NUM) {
+ /*
+ * Virtual GPIO called from ACPI, for now we only support
+ * the panel ctl.
+ */
+ switch (gpio) {
+ case 0x5e:
+ return GPIOPANELCTL;
+ default:
+ return -EOPNOTSUPP;
+ }
+ }
if (reg_type == CTRL_IN) {
if (gpio < 8)
@@ -130,36 +140,36 @@
static int crystalcove_gpio_dir_in(struct gpio_chip *chip, unsigned gpio)
{
struct crystalcove_gpio *cg = gpiochip_get_data(chip);
+ int reg = to_reg(gpio, CTRL_OUT);
- if (gpio > CRYSTALCOVE_VGPIO_NUM)
+ if (reg < 0)
return 0;
- return regmap_write(cg->regmap, to_reg(gpio, CTRL_OUT),
- CTLO_INPUT_SET);
+ return regmap_write(cg->regmap, reg, CTLO_INPUT_SET);
}
static int crystalcove_gpio_dir_out(struct gpio_chip *chip, unsigned gpio,
int value)
{
struct crystalcove_gpio *cg = gpiochip_get_data(chip);
+ int reg = to_reg(gpio, CTRL_OUT);
- if (gpio > CRYSTALCOVE_VGPIO_NUM)
+ if (reg < 0)
return 0;
- return regmap_write(cg->regmap, to_reg(gpio, CTRL_OUT),
- CTLO_OUTPUT_SET | value);
+ return regmap_write(cg->regmap, reg, CTLO_OUTPUT_SET | value);
}
static int crystalcove_gpio_get(struct gpio_chip *chip, unsigned gpio)
{
struct crystalcove_gpio *cg = gpiochip_get_data(chip);
- int ret;
unsigned int val;
+ int ret, reg = to_reg(gpio, CTRL_IN);
- if (gpio > CRYSTALCOVE_VGPIO_NUM)
+ if (reg < 0)
return 0;
- ret = regmap_read(cg->regmap, to_reg(gpio, CTRL_IN), &val);
+ ret = regmap_read(cg->regmap, reg, &val);
if (ret)
return ret;
@@ -170,14 +180,15 @@
unsigned gpio, int value)
{
struct crystalcove_gpio *cg = gpiochip_get_data(chip);
+ int reg = to_reg(gpio, CTRL_OUT);
- if (gpio > CRYSTALCOVE_VGPIO_NUM)
+ if (reg < 0)
return;
if (value)
- regmap_update_bits(cg->regmap, to_reg(gpio, CTRL_OUT), 1, 1);
+ regmap_update_bits(cg->regmap, reg, 1, 1);
else
- regmap_update_bits(cg->regmap, to_reg(gpio, CTRL_OUT), 1, 0);
+ regmap_update_bits(cg->regmap, reg, 1, 0);
}
static int crystalcove_irq_type(struct irq_data *data, unsigned type)
@@ -185,6 +196,9 @@
struct crystalcove_gpio *cg =
gpiochip_get_data(irq_data_get_irq_chip_data(data));
+ if (data->hwirq >= CRYSTALCOVE_GPIO_NUM)
+ return 0;
+
switch (type) {
case IRQ_TYPE_NONE:
cg->intcnt_value = CTLI_INTCNT_DIS;
@@ -235,8 +249,10 @@
struct crystalcove_gpio *cg =
gpiochip_get_data(irq_data_get_irq_chip_data(data));
- cg->set_irq_mask = false;
- cg->update |= UPDATE_IRQ_MASK;
+ if (data->hwirq < CRYSTALCOVE_GPIO_NUM) {
+ cg->set_irq_mask = false;
+ cg->update |= UPDATE_IRQ_MASK;
+ }
}
static void crystalcove_irq_mask(struct irq_data *data)
@@ -244,8 +260,10 @@
struct crystalcove_gpio *cg =
gpiochip_get_data(irq_data_get_irq_chip_data(data));
- cg->set_irq_mask = true;
- cg->update |= UPDATE_IRQ_MASK;
+ if (data->hwirq < CRYSTALCOVE_GPIO_NUM) {
+ cg->set_irq_mask = true;
+ cg->update |= UPDATE_IRQ_MASK;
+ }
}
static struct irq_chip crystalcove_irqchip = {
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index 19a92ef..5104b63 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -747,7 +747,7 @@
set = U32_MAX;
else
return -EINVAL;
- writel_relaxed(0, mvebu_gpioreg_blink_counter_select(mvchip));
+ writel_relaxed(set, mvebu_gpioreg_blink_counter_select(mvchip));
mvpwm = devm_kzalloc(dev, sizeof(struct mvebu_pwm), GFP_KERNEL);
if (!mvpwm)
@@ -768,6 +768,13 @@
mvpwm->chip.dev = dev;
mvpwm->chip.ops = &mvebu_pwm_ops;
mvpwm->chip.npwm = mvchip->chip.ngpio;
+ /*
+ * There may already be some PWM allocated, so we can't force
+ * mvpwm->chip.base to a fixed point like mvchip->chip.base.
+ * So, we let pwmchip_add() do the numbering and take the next free
+ * region.
+ */
+ mvpwm->chip.base = -1;
spin_lock_init(&mvpwm->lock);
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 0cdeb6a..5dffa27 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -1207,8 +1207,11 @@
u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
if (amdgpu_crtc->base.enabled && num_heads && mode) {
- active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock;
- line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535);
+ active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
+ (u32)mode->clock);
+ line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
+ (u32)mode->clock);
+ line_time = min(line_time, (u32)65535);
/* watermark for high clocks */
if (adev->pm.dpm_enabled) {
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index 773654a..47bbc87 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -1176,8 +1176,11 @@
u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
if (amdgpu_crtc->base.enabled && num_heads && mode) {
- active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock;
- line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535);
+ active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
+ (u32)mode->clock);
+ line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
+ (u32)mode->clock);
+ line_time = min(line_time, (u32)65535);
/* watermark for high clocks */
if (adev->pm.dpm_enabled) {
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index 1f35529..d8c9a95 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -983,8 +983,11 @@
fixed20_12 a, b, c;
if (amdgpu_crtc->base.enabled && num_heads && mode) {
- active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock;
- line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535);
+ active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
+ (u32)mode->clock);
+ line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
+ (u32)mode->clock);
+ line_time = min(line_time, (u32)65535);
priority_a_cnt = 0;
priority_b_cnt = 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index 3c558c1..db30c6b 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -1091,8 +1091,11 @@
u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
if (amdgpu_crtc->base.enabled && num_heads && mode) {
- active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock;
- line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535);
+ active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
+ (u32)mode->clock);
+ line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
+ (u32)mode->clock);
+ line_time = min(line_time, (u32)65535);
/* watermark for high clocks */
if (adev->pm.dpm_enabled) {
diff --git a/drivers/gpu/drm/bridge/synopsys/Kconfig b/drivers/gpu/drm/bridge/synopsys/Kconfig
index 40d2827..53e78d0 100644
--- a/drivers/gpu/drm/bridge/synopsys/Kconfig
+++ b/drivers/gpu/drm/bridge/synopsys/Kconfig
@@ -1,6 +1,7 @@
config DRM_DW_HDMI
tristate
select DRM_KMS_HELPER
+ select REGMAP_MMIO
config DRM_DW_HDMI_AHB_AUDIO
tristate "Synopsys Designware AHB Audio interface"
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 8be9719..aa885a6 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -508,6 +508,8 @@
bool has_connectors =
!!new_crtc_state->connector_mask;
+ WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
+
if (!drm_mode_equal(&old_crtc_state->mode, &new_crtc_state->mode)) {
DRM_DEBUG_ATOMIC("[CRTC:%d:%s] mode changed\n",
crtc->base.id, crtc->name);
@@ -551,6 +553,8 @@
for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i) {
const struct drm_connector_helper_funcs *funcs = connector->helper_private;
+ WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
+
/*
* This only sets crtc->connectors_changed for routing changes,
* drivers must set crtc->connectors_changed themselves when
@@ -650,6 +654,8 @@
for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
const struct drm_plane_helper_funcs *funcs;
+ WARN_ON(!drm_modeset_is_locked(&plane->mutex));
+
funcs = plane->helper_private;
drm_atomic_helper_plane_changed(state, old_plane_state, new_plane_state, plane);
@@ -2663,7 +2669,12 @@
drm_modeset_acquire_init(&ctx, 0);
while (1) {
+ err = drm_modeset_lock_all_ctx(dev, &ctx);
+ if (err)
+ goto out;
+
err = drm_atomic_helper_commit_duplicated_state(state, &ctx);
+out:
if (err != -EDEADLK)
break;
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index b5c6bb4..37b8ad3 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -358,7 +358,12 @@
void drm_unplug_dev(struct drm_device *dev)
{
/* for a USB device */
- drm_dev_unregister(dev);
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ drm_modeset_unregister_all(dev);
+
+ drm_minor_unregister(dev, DRM_MINOR_PRIMARY);
+ drm_minor_unregister(dev, DRM_MINOR_RENDER);
+ drm_minor_unregister(dev, DRM_MINOR_CONTROL);
mutex_lock(&drm_global_mutex);
diff --git a/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c b/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
index 5abc69c..f77dcfa 100644
--- a/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
+++ b/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
@@ -760,7 +760,7 @@
* Get the endpoint node. In our case, dsi has one output port1
* to which the external HDMI bridge is connected.
*/
- ret = drm_of_find_panel_or_bridge(np, 0, 0, NULL, &dsi->bridge);
+ ret = drm_of_find_panel_or_bridge(np, 1, 0, NULL, &dsi->bridge);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index c994fe6..4842867 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -1235,6 +1235,15 @@
goto out_fini;
pci_set_drvdata(pdev, &dev_priv->drm);
+ /*
+ * Disable the system suspend direct complete optimization, which can
+ * leave the device suspended skipping the driver's suspend handlers
+ * if the device was already runtime suspended. This is needed due to
+ * the difference in our runtime and system suspend sequence and
+ * becaue the HDA driver may require us to enable the audio power
+ * domain during system suspend.
+ */
+ pdev->dev_flags |= PCI_DEV_FLAGS_NEEDS_RESUME;
ret = i915_driver_init_early(dev_priv, ent);
if (ret < 0)
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 963f6d4..2c453a4 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2991,6 +2991,16 @@
return false;
}
+static inline bool
+intel_ggtt_update_needs_vtd_wa(struct drm_i915_private *dev_priv)
+{
+#ifdef CONFIG_INTEL_IOMMU
+ if (IS_BROXTON(dev_priv) && intel_iommu_gfx_mapped)
+ return true;
+#endif
+ return false;
+}
+
int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
int enable_ppgtt);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index b6ac3df..462031c 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -3298,6 +3298,10 @@
{
int ret;
+ /* If the device is asleep, we have no requests outstanding */
+ if (!READ_ONCE(i915->gt.awake))
+ return 0;
+
if (flags & I915_WAIT_LOCKED) {
struct i915_gem_timeline *tl;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 50b8f11..f1989b8 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -2191,6 +2191,101 @@
gen8_set_pte(>t_base[i], scratch_pte);
}
+static void bxt_vtd_ggtt_wa(struct i915_address_space *vm)
+{
+ struct drm_i915_private *dev_priv = vm->i915;
+
+ /*
+ * Make sure the internal GAM fifo has been cleared of all GTT
+ * writes before exiting stop_machine(). This guarantees that
+ * any aperture accesses waiting to start in another process
+ * cannot back up behind the GTT writes causing a hang.
+ * The register can be any arbitrary GAM register.
+ */
+ POSTING_READ(GFX_FLSH_CNTL_GEN6);
+}
+
+struct insert_page {
+ struct i915_address_space *vm;
+ dma_addr_t addr;
+ u64 offset;
+ enum i915_cache_level level;
+};
+
+static int bxt_vtd_ggtt_insert_page__cb(void *_arg)
+{
+ struct insert_page *arg = _arg;
+
+ gen8_ggtt_insert_page(arg->vm, arg->addr, arg->offset, arg->level, 0);
+ bxt_vtd_ggtt_wa(arg->vm);
+
+ return 0;
+}
+
+static void bxt_vtd_ggtt_insert_page__BKL(struct i915_address_space *vm,
+ dma_addr_t addr,
+ u64 offset,
+ enum i915_cache_level level,
+ u32 unused)
+{
+ struct insert_page arg = { vm, addr, offset, level };
+
+ stop_machine(bxt_vtd_ggtt_insert_page__cb, &arg, NULL);
+}
+
+struct insert_entries {
+ struct i915_address_space *vm;
+ struct sg_table *st;
+ u64 start;
+ enum i915_cache_level level;
+};
+
+static int bxt_vtd_ggtt_insert_entries__cb(void *_arg)
+{
+ struct insert_entries *arg = _arg;
+
+ gen8_ggtt_insert_entries(arg->vm, arg->st, arg->start, arg->level, 0);
+ bxt_vtd_ggtt_wa(arg->vm);
+
+ return 0;
+}
+
+static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm,
+ struct sg_table *st,
+ u64 start,
+ enum i915_cache_level level,
+ u32 unused)
+{
+ struct insert_entries arg = { vm, st, start, level };
+
+ stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL);
+}
+
+struct clear_range {
+ struct i915_address_space *vm;
+ u64 start;
+ u64 length;
+};
+
+static int bxt_vtd_ggtt_clear_range__cb(void *_arg)
+{
+ struct clear_range *arg = _arg;
+
+ gen8_ggtt_clear_range(arg->vm, arg->start, arg->length);
+ bxt_vtd_ggtt_wa(arg->vm);
+
+ return 0;
+}
+
+static void bxt_vtd_ggtt_clear_range__BKL(struct i915_address_space *vm,
+ u64 start,
+ u64 length)
+{
+ struct clear_range arg = { vm, start, length };
+
+ stop_machine(bxt_vtd_ggtt_clear_range__cb, &arg, NULL);
+}
+
static void gen6_ggtt_clear_range(struct i915_address_space *vm,
u64 start, u64 length)
{
@@ -2785,6 +2880,14 @@
ggtt->base.insert_entries = gen8_ggtt_insert_entries;
+ /* Serialize GTT updates with aperture access on BXT if VT-d is on. */
+ if (intel_ggtt_update_needs_vtd_wa(dev_priv)) {
+ ggtt->base.insert_entries = bxt_vtd_ggtt_insert_entries__BKL;
+ ggtt->base.insert_page = bxt_vtd_ggtt_insert_page__BKL;
+ if (ggtt->base.clear_range != nop_clear_range)
+ ggtt->base.clear_range = bxt_vtd_ggtt_clear_range__BKL;
+ }
+
ggtt->invalidate = gen6_ggtt_invalidate;
return ggtt_probe_common(ggtt, size);
@@ -2997,7 +3100,8 @@
void i915_ggtt_disable_guc(struct drm_i915_private *i915)
{
- i915->ggtt.invalidate = gen6_ggtt_invalidate;
+ if (i915->ggtt.invalidate == guc_ggtt_invalidate)
+ i915->ggtt.invalidate = gen6_ggtt_invalidate;
}
void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index a0d6d43..fb5231f 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -278,7 +278,7 @@
obj->mm.quirked = false;
}
if (!i915_gem_object_is_tiled(obj)) {
- GEM_BUG_ON(!obj->mm.quirked);
+ GEM_BUG_ON(obj->mm.quirked);
__i915_gem_object_pin_pages(obj);
obj->mm.quirked = true;
}
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index f87b0c4..1a78363 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -208,7 +208,7 @@
static const struct intel_device_info intel_ironlake_m_info = {
GEN5_FEATURES,
.platform = INTEL_IRONLAKE,
- .is_mobile = 1,
+ .is_mobile = 1, .has_fbc = 1,
};
#define GEN6_FEATURES \
@@ -390,7 +390,6 @@
.has_hw_contexts = 1, \
.has_logical_ring_contexts = 1, \
.has_guc = 1, \
- .has_decoupled_mmio = 1, \
.has_aliasing_ppgtt = 1, \
.has_full_ppgtt = 1, \
.has_full_48bit_ppgtt = 1, \
diff --git a/drivers/gpu/drm/i915/i915_pvinfo.h b/drivers/gpu/drm/i915/i915_pvinfo.h
index c0cb297..2cfe96d3 100644
--- a/drivers/gpu/drm/i915/i915_pvinfo.h
+++ b/drivers/gpu/drm/i915/i915_pvinfo.h
@@ -36,10 +36,6 @@
#define VGT_VERSION_MAJOR 1
#define VGT_VERSION_MINOR 0
-#define INTEL_VGT_IF_VERSION_ENCODE(major, minor) ((major) << 16 | (minor))
-#define INTEL_VGT_IF_VERSION \
- INTEL_VGT_IF_VERSION_ENCODE(VGT_VERSION_MAJOR, VGT_VERSION_MINOR)
-
/*
* notifications from guest to vgpu device model
*/
@@ -55,8 +51,8 @@
struct vgt_if {
u64 magic; /* VGT_MAGIC */
- uint16_t version_major;
- uint16_t version_minor;
+ u16 version_major;
+ u16 version_minor;
u32 vgt_id; /* ID of vGT instance */
u32 rsv1[12]; /* pad to offset 0x40 */
/*
diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c
index 4ab8a97..2e73901 100644
--- a/drivers/gpu/drm/i915/i915_vgpu.c
+++ b/drivers/gpu/drm/i915/i915_vgpu.c
@@ -60,8 +60,8 @@
*/
void i915_check_vgpu(struct drm_i915_private *dev_priv)
{
- uint64_t magic;
- uint32_t version;
+ u64 magic;
+ u16 version_major;
BUILD_BUG_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
@@ -69,10 +69,8 @@
if (magic != VGT_MAGIC)
return;
- version = INTEL_VGT_IF_VERSION_ENCODE(
- __raw_i915_read16(dev_priv, vgtif_reg(version_major)),
- __raw_i915_read16(dev_priv, vgtif_reg(version_minor)));
- if (version != INTEL_VGT_IF_VERSION) {
+ version_major = __raw_i915_read16(dev_priv, vgtif_reg(version_major));
+ if (version_major < VGT_VERSION_MAJOR) {
DRM_INFO("VGT interface version mismatch!\n");
return;
}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 3cabe52..96b0b01 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -4598,7 +4598,7 @@
static int
skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
- unsigned scaler_user, int *scaler_id, unsigned int rotation,
+ unsigned int scaler_user, int *scaler_id,
int src_w, int src_h, int dst_w, int dst_h)
{
struct intel_crtc_scaler_state *scaler_state =
@@ -4607,9 +4607,12 @@
to_intel_crtc(crtc_state->base.crtc);
int need_scaling;
- need_scaling = drm_rotation_90_or_270(rotation) ?
- (src_h != dst_w || src_w != dst_h):
- (src_w != dst_w || src_h != dst_h);
+ /*
+ * Src coordinates are already rotated by 270 degrees for
+ * the 90/270 degree plane rotation cases (to match the
+ * GTT mapping), hence no need to account for rotation here.
+ */
+ need_scaling = src_w != dst_w || src_h != dst_h;
/*
* if plane is being disabled or scaler is no more required or force detach
@@ -4671,7 +4674,7 @@
const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode;
return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
- &state->scaler_state.scaler_id, DRM_ROTATE_0,
+ &state->scaler_state.scaler_id,
state->pipe_src_w, state->pipe_src_h,
adjusted_mode->crtc_hdisplay, adjusted_mode->crtc_vdisplay);
}
@@ -4700,7 +4703,6 @@
ret = skl_update_scaler(crtc_state, force_detach,
drm_plane_index(&intel_plane->base),
&plane_state->scaler_id,
- plane_state->base.rotation,
drm_rect_width(&plane_state->base.src) >> 16,
drm_rect_height(&plane_state->base.src) >> 16,
drm_rect_width(&plane_state->base.dst),
@@ -12203,6 +12205,15 @@
* type. For DP ports it behaves like most other platforms, but on HDMI
* there's an extra 1 line difference. So we need to add two instead of
* one to the value.
+ *
+ * On VLV/CHV DSI the scanline counter would appear to increment
+ * approx. 1/3 of a scanline before start of vblank. Unfortunately
+ * that means we can't tell whether we're in vblank or not while
+ * we're on that particular line. We must still set scanline_offset
+ * to 1 so that the vblank timestamps come out correct when we query
+ * the scanline counter from within the vblank interrupt handler.
+ * However if queried just before the start of vblank we'll get an
+ * answer that's slightly in the future.
*/
if (IS_GEN2(dev_priv)) {
const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 854e8e0..f94eacf 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -1075,6 +1075,22 @@
return 0;
}
+static bool ring_is_idle(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *dev_priv = engine->i915;
+ bool idle = true;
+
+ intel_runtime_pm_get(dev_priv);
+
+ /* No bit for gen2, so assume the CS parser is idle */
+ if (INTEL_GEN(dev_priv) > 2 && !(I915_READ_MODE(engine) & MODE_IDLE))
+ idle = false;
+
+ intel_runtime_pm_put(dev_priv);
+
+ return idle;
+}
+
/**
* intel_engine_is_idle() - Report if the engine has finished process all work
* @engine: the intel_engine_cs
@@ -1084,8 +1100,6 @@
*/
bool intel_engine_is_idle(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->i915;
-
/* Any inflight/incomplete requests? */
if (!i915_seqno_passed(intel_engine_get_seqno(engine),
intel_engine_last_submit(engine)))
@@ -1100,7 +1114,7 @@
return false;
/* Ring stopped? */
- if (INTEL_GEN(dev_priv) > 2 && !(I915_READ_MODE(engine) & MODE_IDLE))
+ if (!ring_is_idle(engine))
return false;
return true;
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index ded2add..d93c584 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -82,20 +82,10 @@
static void intel_fbc_get_plane_source_size(struct intel_fbc_state_cache *cache,
int *width, int *height)
{
- int w, h;
-
- if (drm_rotation_90_or_270(cache->plane.rotation)) {
- w = cache->plane.src_h;
- h = cache->plane.src_w;
- } else {
- w = cache->plane.src_w;
- h = cache->plane.src_h;
- }
-
if (width)
- *width = w;
+ *width = cache->plane.src_w;
if (height)
- *height = h;
+ *height = cache->plane.src_h;
}
static int intel_fbc_calculate_cfb_size(struct drm_i915_private *dev_priv,
@@ -746,6 +736,11 @@
cache->crtc.hsw_bdw_pixel_rate = crtc_state->pixel_rate;
cache->plane.rotation = plane_state->base.rotation;
+ /*
+ * Src coordinates are already rotated by 270 degrees for
+ * the 90/270 degree plane rotation cases (to match the
+ * GTT mapping), hence no need to account for rotation here.
+ */
cache->plane.src_w = drm_rect_width(&plane_state->base.src) >> 16;
cache->plane.src_h = drm_rect_height(&plane_state->base.src) >> 16;
cache->plane.visible = plane_state->base.visible;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 570bd60..078fd1b 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -3373,20 +3373,26 @@
/* n.b., src is 16.16 fixed point, dst is whole integer */
if (plane->id == PLANE_CURSOR) {
+ /*
+ * Cursors only support 0/180 degree rotation,
+ * hence no need to account for rotation here.
+ */
src_w = pstate->base.src_w;
src_h = pstate->base.src_h;
dst_w = pstate->base.crtc_w;
dst_h = pstate->base.crtc_h;
} else {
+ /*
+ * Src coordinates are already rotated by 270 degrees for
+ * the 90/270 degree plane rotation cases (to match the
+ * GTT mapping), hence no need to account for rotation here.
+ */
src_w = drm_rect_width(&pstate->base.src);
src_h = drm_rect_height(&pstate->base.src);
dst_w = drm_rect_width(&pstate->base.dst);
dst_h = drm_rect_height(&pstate->base.dst);
}
- if (drm_rotation_90_or_270(pstate->base.rotation))
- swap(dst_w, dst_h);
-
downscale_h = max(src_h / dst_h, (uint32_t)DRM_PLANE_HELPER_NO_SCALING);
downscale_w = max(src_w / dst_w, (uint32_t)DRM_PLANE_HELPER_NO_SCALING);
@@ -3417,12 +3423,14 @@
if (y && format != DRM_FORMAT_NV12)
return 0;
+ /*
+ * Src coordinates are already rotated by 270 degrees for
+ * the 90/270 degree plane rotation cases (to match the
+ * GTT mapping), hence no need to account for rotation here.
+ */
width = drm_rect_width(&intel_pstate->base.src) >> 16;
height = drm_rect_height(&intel_pstate->base.src) >> 16;
- if (drm_rotation_90_or_270(pstate->rotation))
- swap(width, height);
-
/* for planar format */
if (format == DRM_FORMAT_NV12) {
if (y) /* y-plane data rate */
@@ -3505,12 +3513,14 @@
fb->modifier != I915_FORMAT_MOD_Yf_TILED)
return 8;
+ /*
+ * Src coordinates are already rotated by 270 degrees for
+ * the 90/270 degree plane rotation cases (to match the
+ * GTT mapping), hence no need to account for rotation here.
+ */
src_w = drm_rect_width(&intel_pstate->base.src) >> 16;
src_h = drm_rect_height(&intel_pstate->base.src) >> 16;
- if (drm_rotation_90_or_270(pstate->rotation))
- swap(src_w, src_h);
-
/* Halve UV plane width and height for NV12 */
if (fb->format->format == DRM_FORMAT_NV12 && !y) {
src_w /= 2;
@@ -3794,13 +3804,15 @@
width = intel_pstate->base.crtc_w;
height = intel_pstate->base.crtc_h;
} else {
+ /*
+ * Src coordinates are already rotated by 270 degrees for
+ * the 90/270 degree plane rotation cases (to match the
+ * GTT mapping), hence no need to account for rotation here.
+ */
width = drm_rect_width(&intel_pstate->base.src) >> 16;
height = drm_rect_height(&intel_pstate->base.src) >> 16;
}
- if (drm_rotation_90_or_270(pstate->rotation))
- swap(width, height);
-
cpp = fb->format->cpp[0];
plane_pixel_rate = skl_adjusted_plane_pixel_rate(cstate, intel_pstate);
@@ -4335,11 +4347,19 @@
struct drm_crtc_state *cstate;
struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
struct skl_wm_values *results = &intel_state->wm_results;
+ struct drm_device *dev = state->dev;
struct skl_pipe_wm *pipe_wm;
bool changed = false;
int ret, i;
/*
+ * When we distrust bios wm we always need to recompute to set the
+ * expected DDB allocations for each CRTC.
+ */
+ if (to_i915(dev)->wm.distrust_bios_wm)
+ changed = true;
+
+ /*
* If this transaction isn't actually touching any CRTC's, don't
* bother with watermark calculation. Note that if we pass this
* test, we're guaranteed to hold at least one CRTC state mutex,
@@ -4349,6 +4369,7 @@
*/
for_each_new_crtc_in_state(state, crtc, cstate, i)
changed = true;
+
if (!changed)
return 0;
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index c3780d0..559f1ab 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -435,8 +435,9 @@
}
/* PSR2 is restricted to work with panel resolutions upto 3200x2000 */
- if (intel_crtc->config->pipe_src_w > 3200 ||
- intel_crtc->config->pipe_src_h > 2000) {
+ if (dev_priv->psr.psr2_support &&
+ (intel_crtc->config->pipe_src_w > 3200 ||
+ intel_crtc->config->pipe_src_h > 2000)) {
dev_priv->psr.psr2_support = false;
return false;
}
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 8c87c71..e6517ed 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -83,10 +83,13 @@
*/
void intel_pipe_update_start(struct intel_crtc *crtc)
{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
long timeout = msecs_to_jiffies_timeout(1);
int scanline, min, max, vblank_start;
wait_queue_head_t *wq = drm_crtc_vblank_waitqueue(&crtc->base);
+ bool need_vlv_dsi_wa = (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+ intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DSI);
DEFINE_WAIT(wait);
vblank_start = adjusted_mode->crtc_vblank_start;
@@ -139,6 +142,24 @@
drm_crtc_vblank_put(&crtc->base);
+ /*
+ * On VLV/CHV DSI the scanline counter would appear to
+ * increment approx. 1/3 of a scanline before start of vblank.
+ * The registers still get latched at start of vblank however.
+ * This means we must not write any registers on the first
+ * line of vblank (since not the whole line is actually in
+ * vblank). And unfortunately we can't use the interrupt to
+ * wait here since it will fire too soon. We could use the
+ * frame start interrupt instead since it will fire after the
+ * critical scanline, but that would require more changes
+ * in the interrupt code. So for now we'll just do the nasty
+ * thing and poll for the bad scanline to pass us by.
+ *
+ * FIXME figure out if BXT+ DSI suffers from this as well
+ */
+ while (need_vlv_dsi_wa && scanline == vblank_start)
+ scanline = intel_get_crtc_scanline(crtc);
+
crtc->debug.scanline_start = scanline;
crtc->debug.start_vbl_time = ktime_get();
crtc->debug.start_vbl_count = intel_crtc_get_vblank_counter(crtc);
diff --git a/drivers/gpu/drm/i915/intel_uc.h b/drivers/gpu/drm/i915/intel_uc.h
index 4b7f73a..f841152 100644
--- a/drivers/gpu/drm/i915/intel_uc.h
+++ b/drivers/gpu/drm/i915/intel_uc.h
@@ -59,8 +59,6 @@
* available in the work queue (note, the queue is shared,
* not per-engine). It is OK for this to be nonzero, but
* it should not be huge!
- * q_fail: failed to enqueue a work item. This should never happen,
- * because we check for space beforehand.
* b_fail: failed to ring the doorbell. This should never happen, unless
* somehow the hardware misbehaves, or maybe if the GuC firmware
* crashes? We probably need to reset the GPU to recover.
diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c
index 8fb801f..8b05ecb 100644
--- a/drivers/gpu/drm/imx/imx-ldb.c
+++ b/drivers/gpu/drm/imx/imx-ldb.c
@@ -673,7 +673,7 @@
ret = drm_of_find_panel_or_bridge(child,
imx_ldb->lvds_mux ? 4 : 2, 0,
&channel->panel, &channel->bridge);
- if (ret)
+ if (ret && ret != -ENODEV)
return ret;
/* panel ddc only if there is no bridge */
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index 808b995..b5cc6e1 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -19,6 +19,7 @@
#include <drm/drm_of.h>
#include <linux/clk.h>
#include <linux/component.h>
+#include <linux/iopoll.h>
#include <linux/irq.h>
#include <linux/of.h>
#include <linux/of_platform.h>
@@ -900,16 +901,12 @@
static void mtk_dsi_wait_for_idle(struct mtk_dsi *dsi)
{
- u32 timeout_ms = 500000; /* total 1s ~ 2s timeout */
+ int ret;
+ u32 val;
- while (timeout_ms--) {
- if (!(readl(dsi->regs + DSI_INTSTA) & DSI_BUSY))
- break;
-
- usleep_range(2, 4);
- }
-
- if (timeout_ms == 0) {
+ ret = readl_poll_timeout(dsi->regs + DSI_INTSTA, val, !(val & DSI_BUSY),
+ 4, 2000000);
+ if (ret) {
DRM_WARN("polling dsi wait not busy timeout!\n");
mtk_dsi_enable(dsi);
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
index 41a1c03..0a4ffd7 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
@@ -1062,7 +1062,7 @@
}
err = hdmi_vendor_infoframe_pack(&frame, buffer, sizeof(buffer));
- if (err) {
+ if (err < 0) {
dev_err(hdmi->dev, "Failed to pack vendor infoframe: %zd\n",
err);
return err;
diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
index 75382f5..10b227d 100644
--- a/drivers/gpu/drm/meson/meson_drv.c
+++ b/drivers/gpu/drm/meson/meson_drv.c
@@ -152,7 +152,7 @@
.max_register = 0x1000,
};
-static int meson_drv_bind(struct device *dev)
+static int meson_drv_bind_master(struct device *dev, bool has_components)
{
struct platform_device *pdev = to_platform_device(dev);
struct meson_drm *priv;
@@ -233,10 +233,12 @@
if (ret)
goto free_drm;
- ret = component_bind_all(drm->dev, drm);
- if (ret) {
- dev_err(drm->dev, "Couldn't bind all components\n");
- goto free_drm;
+ if (has_components) {
+ ret = component_bind_all(drm->dev, drm);
+ if (ret) {
+ dev_err(drm->dev, "Couldn't bind all components\n");
+ goto free_drm;
+ }
}
ret = meson_plane_create(priv);
@@ -276,6 +278,11 @@
return ret;
}
+static int meson_drv_bind(struct device *dev)
+{
+ return meson_drv_bind_master(dev, true);
+}
+
static void meson_drv_unbind(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
@@ -357,6 +364,9 @@
count += meson_probe_remote(pdev, &match, np, remote);
}
+ if (count && !match)
+ return meson_drv_bind_master(&pdev->dev, false);
+
/* If some endpoints were found, initialize the nodes */
if (count) {
dev_info(&pdev->dev, "Queued %d outputs on vpu\n", count);
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index adb411a..f4b5358 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -1173,7 +1173,10 @@
if (IS_G200_SE(mdev)) {
- if (mdev->unique_rev_id >= 0x02) {
+ if (mdev->unique_rev_id >= 0x04) {
+ WREG8(MGAREG_CRTCEXT_INDEX, 0x06);
+ WREG8(MGAREG_CRTCEXT_DATA, 0);
+ } else if (mdev->unique_rev_id >= 0x02) {
u8 hi_pri_lvl;
u32 bpp;
u32 mb;
@@ -1639,6 +1642,10 @@
if (mga_vga_calculate_mode_bandwidth(mode, bpp)
> (30100 * 1024))
return MODE_BANDWIDTH;
+ } else {
+ if (mga_vga_calculate_mode_bandwidth(mode, bpp)
+ > (55000 * 1024))
+ return MODE_BANDWIDTH;
}
} else if (mdev->type == G200_WB) {
if (mode->hdisplay > 1280)
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_crtc.c b/drivers/gpu/drm/mxsfb/mxsfb_crtc.c
index 1144e0c..0abe776 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_crtc.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_crtc.c
@@ -35,6 +35,13 @@
#include "mxsfb_drv.h"
#include "mxsfb_regs.h"
+#define MXS_SET_ADDR 0x4
+#define MXS_CLR_ADDR 0x8
+#define MODULE_CLKGATE BIT(30)
+#define MODULE_SFTRST BIT(31)
+/* 1 second delay should be plenty of time for block reset */
+#define RESET_TIMEOUT 1000000
+
static u32 set_hsync_pulse_width(struct mxsfb_drm_private *mxsfb, u32 val)
{
return (val & mxsfb->devdata->hs_wdth_mask) <<
@@ -159,6 +166,36 @@
clk_disable_unprepare(mxsfb->clk_disp_axi);
}
+/*
+ * Clear the bit and poll it cleared. This is usually called with
+ * a reset address and mask being either SFTRST(bit 31) or CLKGATE
+ * (bit 30).
+ */
+static int clear_poll_bit(void __iomem *addr, u32 mask)
+{
+ u32 reg;
+
+ writel(mask, addr + MXS_CLR_ADDR);
+ return readl_poll_timeout(addr, reg, !(reg & mask), 0, RESET_TIMEOUT);
+}
+
+static int mxsfb_reset_block(void __iomem *reset_addr)
+{
+ int ret;
+
+ ret = clear_poll_bit(reset_addr, MODULE_SFTRST);
+ if (ret)
+ return ret;
+
+ writel(MODULE_CLKGATE, reset_addr + MXS_CLR_ADDR);
+
+ ret = clear_poll_bit(reset_addr, MODULE_SFTRST);
+ if (ret)
+ return ret;
+
+ return clear_poll_bit(reset_addr, MODULE_CLKGATE);
+}
+
static void mxsfb_crtc_mode_set_nofb(struct mxsfb_drm_private *mxsfb)
{
struct drm_display_mode *m = &mxsfb->pipe.crtc.state->adjusted_mode;
@@ -173,6 +210,11 @@
*/
mxsfb_enable_axi_clk(mxsfb);
+ /* Mandatory eLCDIF reset as per the Reference Manual */
+ err = mxsfb_reset_block(mxsfb->base);
+ if (err)
+ return;
+
/* Clear the FIFOs */
writel(CTRL1_FIFO_CLEAR, mxsfb->base + LCDC_CTRL1 + REG_SET);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h
index 6a567fe..820a480 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h
@@ -4,6 +4,7 @@
struct nvkm_alarm {
struct list_head head;
+ struct list_head exec;
u64 timestamp;
void (*func)(struct nvkm_alarm *);
};
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 36268e1..15a13d0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -80,7 +80,7 @@
module_param_named(modeset, nouveau_modeset, int, 0400);
MODULE_PARM_DESC(runpm, "disable (0), force enable (1), optimus only default (-1)");
-int nouveau_runtime_pm = -1;
+static int nouveau_runtime_pm = -1;
module_param_named(runpm, nouveau_runtime_pm, int, 0400);
static struct drm_driver driver_stub;
@@ -495,7 +495,7 @@
nouveau_fbcon_init(dev);
nouveau_led_init(dev);
- if (nouveau_runtime_pm != 0) {
+ if (nouveau_pmops_runtime()) {
pm_runtime_use_autosuspend(dev->dev);
pm_runtime_set_autosuspend_delay(dev->dev, 5000);
pm_runtime_set_active(dev->dev);
@@ -527,7 +527,7 @@
{
struct nouveau_drm *drm = nouveau_drm(dev);
- if (nouveau_runtime_pm != 0) {
+ if (nouveau_pmops_runtime()) {
pm_runtime_get_sync(dev->dev);
pm_runtime_forbid(dev->dev);
}
@@ -726,6 +726,14 @@
return nouveau_do_resume(drm_dev, false);
}
+bool
+nouveau_pmops_runtime()
+{
+ if (nouveau_runtime_pm == -1)
+ return nouveau_is_optimus() || nouveau_is_v1_dsm();
+ return nouveau_runtime_pm == 1;
+}
+
static int
nouveau_pmops_runtime_suspend(struct device *dev)
{
@@ -733,14 +741,7 @@
struct drm_device *drm_dev = pci_get_drvdata(pdev);
int ret;
- if (nouveau_runtime_pm == 0) {
- pm_runtime_forbid(dev);
- return -EBUSY;
- }
-
- /* are we optimus enabled? */
- if (nouveau_runtime_pm == -1 && !nouveau_is_optimus() && !nouveau_is_v1_dsm()) {
- DRM_DEBUG_DRIVER("failing to power off - not optimus\n");
+ if (!nouveau_pmops_runtime()) {
pm_runtime_forbid(dev);
return -EBUSY;
}
@@ -765,8 +766,10 @@
struct nvif_device *device = &nouveau_drm(drm_dev)->client.device;
int ret;
- if (nouveau_runtime_pm == 0)
- return -EINVAL;
+ if (!nouveau_pmops_runtime()) {
+ pm_runtime_forbid(dev);
+ return -EBUSY;
+ }
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
@@ -796,14 +799,7 @@
struct nouveau_drm *drm = nouveau_drm(drm_dev);
struct drm_crtc *crtc;
- if (nouveau_runtime_pm == 0) {
- pm_runtime_forbid(dev);
- return -EBUSY;
- }
-
- /* are we optimus enabled? */
- if (nouveau_runtime_pm == -1 && !nouveau_is_optimus() && !nouveau_is_v1_dsm()) {
- DRM_DEBUG_DRIVER("failing to power off - not optimus\n");
+ if (!nouveau_pmops_runtime()) {
pm_runtime_forbid(dev);
return -EBUSY;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index eadec2f..a11b6aa 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -108,8 +108,6 @@
#include <nvif/object.h>
#include <nvif/device.h>
-extern int nouveau_runtime_pm;
-
struct nouveau_drm {
struct nouveau_cli client;
struct drm_device *dev;
@@ -195,6 +193,7 @@
int nouveau_pmops_suspend(struct device *);
int nouveau_pmops_resume(struct device *);
+bool nouveau_pmops_runtime(void);
#include <nvkm/core/tegra.h>
diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
index a4aacbc..02fe0ef 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
@@ -87,7 +87,7 @@
nouveau_vga_init(struct nouveau_drm *drm)
{
struct drm_device *dev = drm->dev;
- bool runtime = false;
+ bool runtime = nouveau_pmops_runtime();
/* only relevant for PCI devices */
if (!dev->pdev)
@@ -99,10 +99,6 @@
if (pci_is_thunderbolt_attached(dev->pdev))
return;
- if (nouveau_runtime_pm == 1)
- runtime = true;
- if ((nouveau_runtime_pm == -1) && (nouveau_is_optimus() || nouveau_is_v1_dsm()))
- runtime = true;
vga_switcheroo_register_client(dev->pdev, &nouveau_switcheroo_ops, runtime);
if (runtime && nouveau_is_v1_dsm() && !nouveau_is_optimus())
@@ -113,18 +109,13 @@
nouveau_vga_fini(struct nouveau_drm *drm)
{
struct drm_device *dev = drm->dev;
- bool runtime = false;
+ bool runtime = nouveau_pmops_runtime();
vga_client_register(dev->pdev, NULL, NULL, NULL);
if (pci_is_thunderbolt_attached(dev->pdev))
return;
- if (nouveau_runtime_pm == 1)
- runtime = true;
- if ((nouveau_runtime_pm == -1) && (nouveau_is_optimus() || nouveau_is_v1_dsm()))
- runtime = true;
-
vga_switcheroo_unregister_client(dev->pdev);
if (runtime && nouveau_is_v1_dsm() && !nouveau_is_optimus())
vga_switcheroo_fini_domain_pm_ops(drm->dev->dev);
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index a766324..06e564a 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -2107,7 +2107,8 @@
asyc->set.dither = true;
}
} else {
- asyc->set.mask = ~0;
+ if (asyc)
+ asyc->set.mask = ~0;
asyh->set.mask = ~0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c
index f2a86ea..2437f7d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c
@@ -50,7 +50,8 @@
/* Move to completed list. We'll drop the lock before
* executing the callback so it can reschedule itself.
*/
- list_move_tail(&alarm->head, &exec);
+ list_del_init(&alarm->head);
+ list_add(&alarm->exec, &exec);
}
/* Shut down interrupt if no more pending alarms. */
@@ -59,8 +60,8 @@
spin_unlock_irqrestore(&tmr->lock, flags);
/* Execute completed callbacks. */
- list_for_each_entry_safe(alarm, atemp, &exec, head) {
- list_del_init(&alarm->head);
+ list_for_each_entry_safe(alarm, atemp, &exec, exec) {
+ list_del(&alarm->exec);
alarm->func(alarm);
}
}
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 008c145..ca44233 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -9267,8 +9267,11 @@
u32 tmp, wm_mask;
if (radeon_crtc->base.enabled && num_heads && mode) {
- active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock;
- line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535);
+ active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
+ (u32)mode->clock);
+ line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
+ (u32)mode->clock);
+ line_time = min(line_time, (u32)65535);
/* watermark for high clocks */
if ((rdev->pm.pm_method == PM_METHOD_DPM) &&
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 0bf1035..5346372 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -2266,8 +2266,11 @@
fixed20_12 a, b, c;
if (radeon_crtc->base.enabled && num_heads && mode) {
- active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock;
- line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535);
+ active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
+ (u32)mode->clock);
+ line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
+ (u32)mode->clock);
+ line_time = min(line_time, (u32)65535);
priority_a_cnt = 0;
priority_b_cnt = 0;
dram_channels = evergreen_get_number_of_dram_channels(rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index 7431eb4..d34d1cf 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -621,7 +621,7 @@
}
/* TODO: is this still necessary on NI+ ? */
- if ((cmd == 0 || cmd == 1 || cmd == 0x3) &&
+ if ((cmd == 0 || cmd == 0x3) &&
(start >> 28) != (p->rdev->uvd.gpu_addr >> 28)) {
DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n",
start, end);
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 76d1888..5303f25 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -2284,8 +2284,11 @@
fixed20_12 a, b, c;
if (radeon_crtc->base.enabled && num_heads && mode) {
- active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock;
- line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535);
+ active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
+ (u32)mode->clock);
+ line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
+ (u32)mode->clock);
+ line_time = min(line_time, (u32)65535);
priority_a_cnt = 0;
priority_b_cnt = 0;
diff --git a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
index d8fa7a9..ce5f2d1 100644
--- a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
+++ b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
@@ -245,8 +245,6 @@
struct drm_connector_state *conn_state)
{
struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state);
- struct rockchip_dp_device *dp = to_dp(encoder);
- int ret;
/*
* The hardware IC designed that VOP must output the RGB10 video
@@ -258,16 +256,6 @@
s->output_mode = ROCKCHIP_OUT_MODE_AAAA;
s->output_type = DRM_MODE_CONNECTOR_eDP;
- if (dp->data->chip_type == RK3399_EDP) {
- /*
- * For RK3399, VOP Lit must code the out mode to RGB888,
- * VOP Big must code the out mode to RGB10.
- */
- ret = drm_of_encoder_active_endpoint_id(dp->dev->of_node,
- encoder);
- if (ret > 0)
- s->output_mode = ROCKCHIP_OUT_MODE_P888;
- }
return 0;
}
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c
index a2169dd..14fa1f8 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-core.c
+++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c
@@ -615,7 +615,6 @@
{
struct cdn_dp_device *dp = encoder_to_dp(encoder);
int ret, val;
- struct rockchip_crtc_state *state;
ret = drm_of_encoder_active_endpoint_id(dp->dev->of_node, encoder);
if (ret < 0) {
@@ -625,14 +624,10 @@
DRM_DEV_DEBUG_KMS(dp->dev, "vop %s output to cdn-dp\n",
(ret) ? "LIT" : "BIG");
- state = to_rockchip_crtc_state(encoder->crtc->state);
- if (ret) {
+ if (ret)
val = DP_SEL_VOP_LIT | (DP_SEL_VOP_LIT << 16);
- state->output_mode = ROCKCHIP_OUT_MODE_P888;
- } else {
+ else
val = DP_SEL_VOP_LIT << 16;
- state->output_mode = ROCKCHIP_OUT_MODE_AAAA;
- }
ret = cdn_dp_grf_write(dp, GRF_SOC_CON9, val);
if (ret)
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index 3f7a82d..45589d6 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -875,6 +875,7 @@
static void vop_crtc_enable(struct drm_crtc *crtc)
{
struct vop *vop = to_vop(crtc);
+ const struct vop_data *vop_data = vop->data;
struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc->state);
struct drm_display_mode *adjusted_mode = &crtc->state->adjusted_mode;
u16 hsync_len = adjusted_mode->hsync_end - adjusted_mode->hsync_start;
@@ -967,6 +968,13 @@
DRM_DEV_ERROR(vop->dev, "unsupported connector_type [%d]\n",
s->output_type);
}
+
+ /*
+ * if vop is not support RGB10 output, need force RGB10 to RGB888.
+ */
+ if (s->output_mode == ROCKCHIP_OUT_MODE_AAAA &&
+ !(vop_data->feature & VOP_FEATURE_OUTPUT_RGB10))
+ s->output_mode = ROCKCHIP_OUT_MODE_P888;
VOP_CTRL_SET(vop, out_mode, s->output_mode);
VOP_CTRL_SET(vop, htotal_pw, (htotal << 16) | hsync_len);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
index 5a4faa85..9979fd0 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
@@ -142,6 +142,9 @@
const struct vop_intr *intr;
const struct vop_win_data *win;
unsigned int win_size;
+
+#define VOP_FEATURE_OUTPUT_RGB10 BIT(0)
+ u64 feature;
};
/* interrupt define */
diff --git a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
index 0da4444..bafd698 100644
--- a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
+++ b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
@@ -275,6 +275,7 @@
static const struct vop_data rk3288_vop = {
.init_table = rk3288_init_reg_table,
.table_size = ARRAY_SIZE(rk3288_init_reg_table),
+ .feature = VOP_FEATURE_OUTPUT_RGB10,
.intr = &rk3288_vop_intr,
.ctrl = &rk3288_ctrl_data,
.win = rk3288_vop_win_data,
@@ -343,6 +344,7 @@
static const struct vop_data rk3399_vop_big = {
.init_table = rk3399_init_reg_table,
.table_size = ARRAY_SIZE(rk3399_init_reg_table),
+ .feature = VOP_FEATURE_OUTPUT_RGB10,
.intr = &rk3399_vop_intr,
.ctrl = &rk3399_ctrl_data,
/*
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index 9a1e34e..81f86a6 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -451,18 +451,6 @@
#ifdef CONFIG_DRM_TEGRA_STAGING
-static struct tegra_drm_context *
-tegra_drm_file_get_context(struct tegra_drm_file *file, u32 id)
-{
- struct tegra_drm_context *context;
-
- mutex_lock(&file->lock);
- context = idr_find(&file->contexts, id);
- mutex_unlock(&file->lock);
-
- return context;
-}
-
static int tegra_gem_create(struct drm_device *drm, void *data,
struct drm_file *file)
{
@@ -551,7 +539,7 @@
if (err < 0)
return err;
- err = idr_alloc(&fpriv->contexts, context, 0, 0, GFP_KERNEL);
+ err = idr_alloc(&fpriv->contexts, context, 1, 0, GFP_KERNEL);
if (err < 0) {
client->ops->close_channel(context);
return err;
@@ -606,7 +594,7 @@
mutex_lock(&fpriv->lock);
- context = tegra_drm_file_get_context(fpriv, args->context);
+ context = idr_find(&fpriv->contexts, args->context);
if (!context) {
err = -EINVAL;
goto unlock;
@@ -631,7 +619,7 @@
mutex_lock(&fpriv->lock);
- context = tegra_drm_file_get_context(fpriv, args->context);
+ context = idr_find(&fpriv->contexts, args->context);
if (!context) {
err = -ENODEV;
goto unlock;
@@ -660,7 +648,7 @@
mutex_lock(&fpriv->lock);
- context = tegra_drm_file_get_context(fpriv, args->context);
+ context = idr_find(&fpriv->contexts, args->context);
if (!context) {
err = -ENODEV;
goto unlock;
@@ -685,7 +673,7 @@
mutex_lock(&fpriv->lock);
- context = tegra_drm_file_get_context(fpriv, args->context);
+ context = idr_find(&fpriv->contexts, args->context);
if (!context) {
err = -ENODEV;
goto unlock;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 130d51c..4b948fb 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -41,9 +41,9 @@
#include <drm/ttm/ttm_module.h>
#include "vmwgfx_fence.h"
-#define VMWGFX_DRIVER_DATE "20170221"
+#define VMWGFX_DRIVER_DATE "20170607"
#define VMWGFX_DRIVER_MAJOR 2
-#define VMWGFX_DRIVER_MINOR 12
+#define VMWGFX_DRIVER_MINOR 13
#define VMWGFX_DRIVER_PATCHLEVEL 0
#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
index b6a0806..a1c68e6 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
@@ -368,6 +368,8 @@
return fifo_state->static_buffer;
else {
fifo_state->dynamic_buffer = vmalloc(bytes);
+ if (!fifo_state->dynamic_buffer)
+ goto out_err;
return fifo_state->dynamic_buffer;
}
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index ef9f3a2..1d2db5d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -274,108 +274,6 @@
}
-
-/**
- * vmw_du_cursor_plane_update() - Update cursor image and location
- *
- * @plane: plane object to update
- * @crtc: owning CRTC of @plane
- * @fb: framebuffer to flip onto plane
- * @crtc_x: x offset of plane on crtc
- * @crtc_y: y offset of plane on crtc
- * @crtc_w: width of plane rectangle on crtc
- * @crtc_h: height of plane rectangle on crtc
- * @src_x: Not used
- * @src_y: Not used
- * @src_w: Not used
- * @src_h: Not used
- *
- *
- * RETURNS:
- * Zero on success, error code on failure
- */
-int vmw_du_cursor_plane_update(struct drm_plane *plane,
- struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- int crtc_x, int crtc_y,
- unsigned int crtc_w,
- unsigned int crtc_h,
- uint32_t src_x, uint32_t src_y,
- uint32_t src_w, uint32_t src_h)
-{
- struct vmw_private *dev_priv = vmw_priv(crtc->dev);
- struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
- struct vmw_surface *surface = NULL;
- struct vmw_dma_buffer *dmabuf = NULL;
- s32 hotspot_x, hotspot_y;
- int ret;
-
- hotspot_x = du->hotspot_x + fb->hot_x;
- hotspot_y = du->hotspot_y + fb->hot_y;
-
- /* A lot of the code assumes this */
- if (crtc_w != 64 || crtc_h != 64) {
- ret = -EINVAL;
- goto out;
- }
-
- if (vmw_framebuffer_to_vfb(fb)->dmabuf)
- dmabuf = vmw_framebuffer_to_vfbd(fb)->buffer;
- else
- surface = vmw_framebuffer_to_vfbs(fb)->surface;
-
- if (surface && !surface->snooper.image) {
- DRM_ERROR("surface not suitable for cursor\n");
- ret = -EINVAL;
- goto out;
- }
-
- /* setup new image */
- ret = 0;
- if (surface) {
- /* vmw_user_surface_lookup takes one reference */
- du->cursor_surface = surface;
-
- du->cursor_age = du->cursor_surface->snooper.age;
-
- ret = vmw_cursor_update_image(dev_priv, surface->snooper.image,
- 64, 64, hotspot_x, hotspot_y);
- } else if (dmabuf) {
- /* vmw_user_surface_lookup takes one reference */
- du->cursor_dmabuf = dmabuf;
-
- ret = vmw_cursor_update_dmabuf(dev_priv, dmabuf, crtc_w, crtc_h,
- hotspot_x, hotspot_y);
- } else {
- vmw_cursor_update_position(dev_priv, false, 0, 0);
- goto out;
- }
-
- if (!ret) {
- du->cursor_x = crtc_x + du->set_gui_x;
- du->cursor_y = crtc_y + du->set_gui_y;
-
- vmw_cursor_update_position(dev_priv, true,
- du->cursor_x + hotspot_x,
- du->cursor_y + hotspot_y);
- }
-
-out:
- return ret;
-}
-
-
-int vmw_du_cursor_plane_disable(struct drm_plane *plane)
-{
- if (plane->fb) {
- drm_framebuffer_unreference(plane->fb);
- plane->fb = NULL;
- }
-
- return -EINVAL;
-}
-
-
void vmw_du_cursor_plane_destroy(struct drm_plane *plane)
{
vmw_cursor_update_position(plane->dev->dev_private, false, 0, 0);
@@ -473,18 +371,6 @@
void
-vmw_du_cursor_plane_atomic_disable(struct drm_plane *plane,
- struct drm_plane_state *old_state)
-{
- struct drm_crtc *crtc = plane->state->crtc ?: old_state->crtc;
- struct vmw_private *dev_priv = vmw_priv(crtc->dev);
-
- drm_atomic_set_fb_for_plane(plane->state, NULL);
- vmw_cursor_update_position(dev_priv, false, 0, 0);
-}
-
-
-void
vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
@@ -1498,6 +1384,7 @@
*/
if (vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height) &&
dmabuf && only_2d &&
+ mode_cmd->width > 64 && /* Don't create a proxy for cursor */
dev_priv->active_display_unit == vmw_du_screen_target) {
ret = vmw_create_dmabuf_proxy(dev_priv->dev, mode_cmd,
dmabuf, &surface);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index 13f2f1d2..5f8d678 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -256,10 +256,6 @@
u16 *r, u16 *g, u16 *b,
uint32_t size,
struct drm_modeset_acquire_ctx *ctx);
-int vmw_du_crtc_cursor_set2(struct drm_crtc *crtc, struct drm_file *file_priv,
- uint32_t handle, uint32_t width, uint32_t height,
- int32_t hot_x, int32_t hot_y);
-int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y);
int vmw_du_connector_set_property(struct drm_connector *connector,
struct drm_property *property,
uint64_t val);
@@ -339,15 +335,6 @@
/* Universal Plane Helpers */
void vmw_du_primary_plane_destroy(struct drm_plane *plane);
void vmw_du_cursor_plane_destroy(struct drm_plane *plane);
-int vmw_du_cursor_plane_disable(struct drm_plane *plane);
-int vmw_du_cursor_plane_update(struct drm_plane *plane,
- struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- int crtc_x, int crtc_y,
- unsigned int crtc_w,
- unsigned int crtc_h,
- uint32_t src_x, uint32_t src_y,
- uint32_t src_w, uint32_t src_h);
/* Atomic Helpers */
int vmw_du_primary_plane_atomic_check(struct drm_plane *plane,
@@ -356,8 +343,6 @@
struct drm_plane_state *state);
void vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
struct drm_plane_state *old_state);
-void vmw_du_cursor_plane_atomic_disable(struct drm_plane *plane,
- struct drm_plane_state *old_state);
int vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane,
struct drm_plane_state *new_state);
void vmw_du_plane_cleanup_fb(struct drm_plane *plane,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index bad31bd..50be1f0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -56,6 +56,8 @@
* @right: Right side of bounding box.
* @top: Top side of bounding box.
* @bottom: Bottom side of bounding box.
+ * @fb_left: Left side of the framebuffer/content bounding box
+ * @fb_top: Top of the framebuffer/content bounding box
* @buf: DMA buffer when DMA-ing between buffer and screen targets.
* @sid: Surface ID when copying between surface and screen targets.
*/
@@ -63,6 +65,7 @@
struct vmw_kms_dirty base;
SVGA3dTransferType transfer;
s32 left, right, top, bottom;
+ s32 fb_left, fb_top;
u32 pitch;
union {
struct vmw_dma_buffer *buf;
@@ -647,7 +650,7 @@
*
* @dirty: The closure structure.
*
- * This function calculates the bounding box for all the incoming clips
+ * This function calculates the bounding box for all the incoming clips.
*/
static void vmw_stdu_dmabuf_cpu_clip(struct vmw_kms_dirty *dirty)
{
@@ -656,11 +659,19 @@
dirty->num_hits = 1;
- /* Calculate bounding box */
+ /* Calculate destination bounding box */
ddirty->left = min_t(s32, ddirty->left, dirty->unit_x1);
ddirty->top = min_t(s32, ddirty->top, dirty->unit_y1);
ddirty->right = max_t(s32, ddirty->right, dirty->unit_x2);
ddirty->bottom = max_t(s32, ddirty->bottom, dirty->unit_y2);
+
+ /*
+ * Calculate content bounding box. We only need the top-left
+ * coordinate because width and height will be the same as the
+ * destination bounding box above
+ */
+ ddirty->fb_left = min_t(s32, ddirty->fb_left, dirty->fb_x);
+ ddirty->fb_top = min_t(s32, ddirty->fb_top, dirty->fb_y);
}
@@ -697,11 +708,11 @@
/* Assume we are blitting from Host (display_srf) to Guest (dmabuf) */
src_pitch = stdu->display_srf->base_size.width * stdu->cpp;
src = ttm_kmap_obj_virtual(&stdu->host_map, ¬_used);
- src += dirty->unit_y1 * src_pitch + dirty->unit_x1 * stdu->cpp;
+ src += ddirty->top * src_pitch + ddirty->left * stdu->cpp;
dst_pitch = ddirty->pitch;
dst = ttm_kmap_obj_virtual(&stdu->guest_map, ¬_used);
- dst += dirty->fb_y * dst_pitch + dirty->fb_x * stdu->cpp;
+ dst += ddirty->fb_top * dst_pitch + ddirty->fb_left * stdu->cpp;
/* Figure out the real direction */
@@ -760,7 +771,7 @@
}
out_cleanup:
- ddirty->left = ddirty->top = S32_MAX;
+ ddirty->left = ddirty->top = ddirty->fb_left = ddirty->fb_top = S32_MAX;
ddirty->right = ddirty->bottom = S32_MIN;
}
@@ -812,6 +823,7 @@
SVGA3D_READ_HOST_VRAM;
ddirty.left = ddirty.top = S32_MAX;
ddirty.right = ddirty.bottom = S32_MIN;
+ ddirty.fb_left = ddirty.fb_top = S32_MAX;
ddirty.pitch = vfb->base.pitches[0];
ddirty.buf = buf;
ddirty.base.fifo_commit = vmw_stdu_dmabuf_fifo_commit;
@@ -1355,6 +1367,11 @@
DRM_ERROR("Failed to bind surface to STDU.\n");
else
crtc->primary->fb = plane->state->fb;
+
+ ret = vmw_stdu_update_st(dev_priv, stdu);
+
+ if (ret)
+ DRM_ERROR("Failed to update STDU.\n");
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 7681341..6b70bd2 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -1274,11 +1274,14 @@
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
int ret;
uint32_t size;
- uint32_t backup_handle;
+ uint32_t backup_handle = 0;
if (req->multisample_count != 0)
return -EINVAL;
+ if (req->mip_levels > DRM_VMW_MAX_MIP_LEVELS)
+ return -EINVAL;
+
if (unlikely(vmw_user_surface_size == 0))
vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
128;
@@ -1314,12 +1317,16 @@
ret = vmw_user_dmabuf_lookup(tfile, req->buffer_handle,
&res->backup,
&user_srf->backup_base);
- if (ret == 0 && res->backup->base.num_pages * PAGE_SIZE <
- res->backup_size) {
- DRM_ERROR("Surface backup buffer is too small.\n");
- vmw_dmabuf_unreference(&res->backup);
- ret = -EINVAL;
- goto out_unlock;
+ if (ret == 0) {
+ if (res->backup->base.num_pages * PAGE_SIZE <
+ res->backup_size) {
+ DRM_ERROR("Surface backup buffer is too small.\n");
+ vmw_dmabuf_unreference(&res->backup);
+ ret = -EINVAL;
+ goto out_unlock;
+ } else {
+ backup_handle = req->buffer_handle;
+ }
}
} else if (req->drm_surface_flags & drm_vmw_surface_flag_create_buffer)
ret = vmw_user_dmabuf_alloc(dev_priv, tfile,
@@ -1491,7 +1498,7 @@
dev_priv->stdu_max_height);
if (size.width > max_width || size.height > max_height) {
- DRM_ERROR("%ux%u\n, exeeds max surface size %ux%u",
+ DRM_ERROR("%ux%u\n, exceeds max surface size %ux%u",
size.width, size.height,
max_width, max_height);
return -EINVAL;
diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c
index f05ebb1..ac65f52 100644
--- a/drivers/gpu/host1x/dev.c
+++ b/drivers/gpu/host1x/dev.c
@@ -172,7 +172,7 @@
host->rst = devm_reset_control_get(&pdev->dev, "host1x");
if (IS_ERR(host->rst)) {
- err = PTR_ERR(host->clk);
+ err = PTR_ERR(host->rst);
dev_err(&pdev->dev, "failed to get reset: %d\n", err);
return err;
}
diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c
index 16d5568..2fb5f43 100644
--- a/drivers/gpu/ipu-v3/ipu-common.c
+++ b/drivers/gpu/ipu-v3/ipu-common.c
@@ -725,15 +725,16 @@
spin_lock_irqsave(&ipu->lock, flags);
val = ipu_cm_read(ipu, IPU_CONF);
- if (vdi) {
+ if (vdi)
val |= IPU_CONF_IC_INPUT;
- } else {
+ else
val &= ~IPU_CONF_IC_INPUT;
- if (csi_id == 1)
- val |= IPU_CONF_CSI_SEL;
- else
- val &= ~IPU_CONF_CSI_SEL;
- }
+
+ if (csi_id == 1)
+ val |= IPU_CONF_CSI_SEL;
+ else
+ val &= ~IPU_CONF_CSI_SEL;
+
ipu_cm_write(ipu, val, IPU_CONF);
spin_unlock_irqrestore(&ipu->lock, flags);
diff --git a/drivers/gpu/ipu-v3/ipu-pre.c b/drivers/gpu/ipu-v3/ipu-pre.c
index c555633..c35f74c 100644
--- a/drivers/gpu/ipu-v3/ipu-pre.c
+++ b/drivers/gpu/ipu-v3/ipu-pre.c
@@ -131,8 +131,6 @@
if (pre->in_use)
return -EBUSY;
- clk_prepare_enable(pre->clk_axi);
-
/* first get the engine out of reset and remove clock gating */
writel(0, pre->regs + IPU_PRE_CTRL);
@@ -149,12 +147,7 @@
void ipu_pre_put(struct ipu_pre *pre)
{
- u32 val;
-
- val = IPU_PRE_CTRL_SFTRST | IPU_PRE_CTRL_CLKGATE;
- writel(val, pre->regs + IPU_PRE_CTRL);
-
- clk_disable_unprepare(pre->clk_axi);
+ writel(IPU_PRE_CTRL_SFTRST, pre->regs + IPU_PRE_CTRL);
pre->in_use = false;
}
@@ -249,6 +242,8 @@
if (!pre->buffer_virt)
return -ENOMEM;
+ clk_prepare_enable(pre->clk_axi);
+
pre->dev = dev;
platform_set_drvdata(pdev, pre);
mutex_lock(&ipu_pre_list_mutex);
@@ -268,6 +263,8 @@
available_pres--;
mutex_unlock(&ipu_pre_list_mutex);
+ clk_disable_unprepare(pre->clk_axi);
+
if (pre->buffer_virt)
gen_pool_free(pre->iram, (unsigned long)pre->buffer_virt,
IPU_PRE_MAX_WIDTH * IPU_PRE_NUM_SCANLINES * 4);
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 04cee65..6e04069 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -826,11 +826,35 @@
* hid-rmi should take care of them,
* not hid-generic
*/
- if (IS_ENABLED(CONFIG_HID_RMI))
- hid->group = HID_GROUP_RMI;
+ hid->group = HID_GROUP_RMI;
break;
}
+ /* fall back to generic driver in case specific driver doesn't exist */
+ switch (hid->group) {
+ case HID_GROUP_MULTITOUCH_WIN_8:
+ /* fall-through */
+ case HID_GROUP_MULTITOUCH:
+ if (!IS_ENABLED(CONFIG_HID_MULTITOUCH))
+ hid->group = HID_GROUP_GENERIC;
+ break;
+ case HID_GROUP_SENSOR_HUB:
+ if (!IS_ENABLED(CONFIG_HID_SENSOR_HUB))
+ hid->group = HID_GROUP_GENERIC;
+ break;
+ case HID_GROUP_RMI:
+ if (!IS_ENABLED(CONFIG_HID_RMI))
+ hid->group = HID_GROUP_GENERIC;
+ break;
+ case HID_GROUP_WACOM:
+ if (!IS_ENABLED(CONFIG_HID_WACOM))
+ hid->group = HID_GROUP_GENERIC;
+ break;
+ case HID_GROUP_LOGITECH_DJ_DEVICE:
+ if (!IS_ENABLED(CONFIG_HID_LOGITECH_DJ))
+ hid->group = HID_GROUP_GENERIC;
+ break;
+ }
vfree(parser);
return 0;
}
@@ -1763,15 +1787,23 @@
* used as a driver. See hid_scan_report().
*/
static const struct hid_device_id hid_have_special_driver[] = {
+#if IS_ENABLED(CONFIG_HID_A4TECH)
{ HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_WCP32PU) },
{ HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_X5_005D) },
{ HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_RP_649) },
+#endif
+#if IS_ENABLED(CONFIG_HID_ACCUTOUCH)
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_ACCUTOUCH_2216) },
+#endif
+#if IS_ENABLED(CONFIG_HID_ACRUX)
{ HID_USB_DEVICE(USB_VENDOR_ID_ACRUX, 0x0802) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ACRUX, 0xf705) },
+#endif
+#if IS_ENABLED(CONFIG_HID_ALPS)
{ HID_DEVICE(HID_BUS_ANY, HID_GROUP_ANY, USB_VENDOR_ID_ALPS_JP, HID_DEVICE_ID_ALPS_U1_DUAL) },
+#endif
+#if IS_ENABLED(CONFIG_HID_APPLE)
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MIGHTYMOUSE) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICMOUSE) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICTRACKPAD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ANSI) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ISO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_ANSI) },
@@ -1792,11 +1824,6 @@
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_ANSI) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_ISO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL2) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL3) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL4) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL5) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ISO) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_JIS) },
@@ -1851,62 +1878,100 @@
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
+#endif
+#if IS_ENABLED(CONFIG_HID_APPLEIR)
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL3) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL4) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL5) },
+#endif
+#if IS_ENABLED(CONFIG_HID_ASUS)
{ HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_I2C_KEYBOARD) },
{ HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_I2C_TOUCHPAD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD1) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_T100_KEYBOARD) },
+#endif
+#if IS_ENABLED(CONFIG_HID_AUREAL)
{ HID_USB_DEVICE(USB_VENDOR_ID_AUREAL, USB_DEVICE_ID_AUREAL_W01RN) },
+#endif
+#if IS_ENABLED(CONFIG_HID_BELKIN)
{ HID_USB_DEVICE(USB_VENDOR_ID_BELKIN, USB_DEVICE_ID_FLIP_KVM) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD) },
+#endif
+#if IS_ENABLED(CONFIG_HID_BETOP_FF)
{ HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185BFM, 0x2208) },
{ HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185PC, 0x5506) },
{ HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185V2PC, 0x1850) },
{ HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185V2BFM, 0x5500) },
- { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE) },
- { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE_2) },
+#endif
+#if IS_ENABLED(CONFIG_HID_CHERRY)
{ HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION_SOLAR) },
+#endif
+#if IS_ENABLED(CONFIG_HID_CHICONY)
{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_TACTICAL_PAD) },
- { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_AK1D) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_ACER_SWITCH12) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_JESS, USB_DEVICE_ID_JESS_ZEN_AIO_KBD) },
+#endif
+#if IS_ENABLED(CONFIG_HID_CMEDIA)
+ { HID_USB_DEVICE(USB_VENDOR_ID_CMEDIA, USB_DEVICE_ID_CM6533) },
+#endif
+#if IS_ENABLED(CONFIG_HID_CORSAIR)
{ HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K90) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB) },
- { HID_USB_DEVICE(USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_PRODIKEYS_PCMIDI) },
+#endif
+#if IS_ENABLED(CONFIG_HID_CP2112)
{ HID_USB_DEVICE(USB_VENDOR_ID_CYGNAL, USB_DEVICE_ID_CYGNAL_CP2112) },
+#endif
+#if IS_ENABLED(CONFIG_HID_CYPRESS)
{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_1) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_3) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_4) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_MOUSE) },
- { HID_USB_DEVICE(USB_VENDOR_ID_DELCOM, USB_DEVICE_ID_DELCOM_VISUAL_IND) },
+#endif
+#if IS_ENABLED(CONFIG_HID_DRAGONRISE)
{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0006) },
{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0011) },
-#if IS_ENABLED(CONFIG_HID_MAYFLASH)
- { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3) },
- { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_DOLPHINBAR) },
- { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE1) },
- { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE2) },
#endif
- { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, USB_DEVICE_ID_DREAM_CHEEKY_WN) },
- { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, USB_DEVICE_ID_DREAM_CHEEKY_FA) },
+#if IS_ENABLED(CONFIG_HID_ELECOM)
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRED) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRELESS) },
+#endif
+#if IS_ENABLED(CONFIG_HID_ELO)
{ HID_USB_DEVICE(USB_VENDOR_ID_ELO, 0x0009) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELO, 0x0030) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_ACCUTOUCH_2216) },
+#endif
+#if IS_ENABLED(CONFIG_HID_EMS_FF)
{ HID_USB_DEVICE(USB_VENDOR_ID_EMS, USB_DEVICE_ID_EMS_TRIO_LINKER_PLUS_II) },
+#endif
+#if IS_ENABLED(CONFIG_HID_EZKEY)
{ HID_USB_DEVICE(USB_VENDOR_ID_EZKEY, USB_DEVICE_ID_BTC_8193) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PSX_ADAPTOR) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PCS_ADAPTOR) },
+#endif
+#if IS_ENABLED(CONFIG_HID_GEMBIRD)
{ HID_USB_DEVICE(USB_VENDOR_ID_GEMBIRD, USB_DEVICE_ID_GEMBIRD_JPD_DUALFORCE2) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, 0x0003) },
+#endif
+#if IS_ENABLED(CONFIG_HID_GFRM)
+ { HID_BLUETOOTH_DEVICE(0x58, 0x2000) },
+ { HID_BLUETOOTH_DEVICE(0x471, 0x2210) },
+#endif
+#if IS_ENABLED(CONFIG_HID_GREENASIA)
{ HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, 0x0012) },
+#endif
+#if IS_ENABLED(CONFIG_HID_GT683R)
+ { HID_USB_DEVICE(USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GT683R_LED_PANEL) },
+#endif
+#if IS_ENABLED(CONFIG_HID_GYRATION)
{ HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_3) },
+#endif
+#if IS_ENABLED(CONFIG_HID_HOLTEK)
{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK, USB_DEVICE_ID_HOLTEK_ON_LINE_GRIP) },
{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A04A) },
@@ -1915,12 +1980,17 @@
{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A072) },
{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081) },
{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A0C2) },
- { HID_USB_DEVICE(USB_VENDOR_ID_HUION, USB_DEVICE_ID_HUION_TABLET) },
- { HID_USB_DEVICE(USB_VENDOR_ID_JESS, USB_DEVICE_ID_JESS_ZEN_AIO_KBD) },
- { HID_USB_DEVICE(USB_VENDOR_ID_JESS2, USB_DEVICE_ID_JESS2_COLOR_RUMBLE_PAD) },
+#endif
+#if IS_ENABLED(CONFIG_HID_ICADE)
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ION, USB_DEVICE_ID_ICADE) },
+#endif
+#if IS_ENABLED(CONFIG_HID_KENSINGTON)
{ HID_USB_DEVICE(USB_VENDOR_ID_KENSINGTON, USB_DEVICE_ID_KS_SLIMBLADE) },
+#endif
+#if IS_ENABLED(CONFIG_HID_KEYTOUCH)
{ HID_USB_DEVICE(USB_VENDOR_ID_KEYTOUCH, USB_DEVICE_ID_KEYTOUCH_IEC) },
+#endif
+#if IS_ENABLED(CONFIG_HID_KYE)
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_GILA_GAMING_MOUSE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_MANTICORE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_GX_IMPERATOR) },
@@ -1930,21 +2000,29 @@
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X_V2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_PENSKETCH_M912) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD) },
+#endif
+#if IS_ENABLED(CONFIG_HID_LCPOWER)
{ HID_USB_DEVICE(USB_VENDOR_ID_LCPOWER, USB_DEVICE_ID_LCPOWER_LC1000 ) },
+#endif
+#if IS_ENABLED(CONFIG_HID_LED)
+ { HID_USB_DEVICE(USB_VENDOR_ID_DELCOM, USB_DEVICE_ID_DELCOM_VISUAL_IND) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, USB_DEVICE_ID_DREAM_CHEEKY_WN) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, USB_DEVICE_ID_DREAM_CHEEKY_FA) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_LUXAFOR) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_RISO_KAGAKU, USB_DEVICE_ID_RI_KA_WEBMAIL) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_THINGM, USB_DEVICE_ID_BLINK1) },
+#endif
#if IS_ENABLED(CONFIG_HID_LENOVO)
{ HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TPKBD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_CUSBKBD) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_CBTKBD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TPPRODOCK) },
#endif
- { HID_USB_DEVICE(USB_VENDOR_ID_LG, USB_DEVICE_ID_LG_MELFAS_MT) },
+#if IS_ENABLED(CONFIG_HID_LOGITECH)
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_MX3000_RECEIVER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER_2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RECEIVER) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_PS3) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_T651) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_DESKTOP) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_EDGE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_MINI) },
@@ -1957,7 +2035,6 @@
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2_2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G29_WHEEL) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G920_WHEEL) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_F3D) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_FFG ) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_FORCE3D_PRO) },
@@ -1969,17 +2046,30 @@
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DFGT_WHEEL) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G25_WHEEL) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G27_WHEEL) },
-#if IS_ENABLED(CONFIG_HID_LOGITECH_DJ)
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER_2) },
-#endif
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WII_WHEEL) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACETRAVELLER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACENAVIGATOR) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD_BOOTLOADER) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_LUXAFOR) },
+#endif
+#if IS_ENABLED(CONFIG_HID_LOGITECH_HIDPP)
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_T651) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G920_WHEEL) },
+#endif
+#if IS_ENABLED(CONFIG_HID_LOGITECH_DJ)
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER_2) },
+#endif
+#if IS_ENABLED(CONFIG_HID_MAGICMOUSE)
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICMOUSE) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICTRACKPAD) },
+#endif
+#if IS_ENABLED(CONFIG_HID_MAYFLASH)
+ { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_DOLPHINBAR) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE1) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE2) },
+#endif
+#if IS_ENABLED(CONFIG_HID_MICROSOFT)
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_MOUSE_4500) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_KEYBOARD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_SIDEWINDER_GV) },
@@ -1995,9 +2085,22 @@
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_600) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3KV1) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_BT) },
+#endif
+#if IS_ENABLED(CONFIG_HID_MONTEREY)
{ HID_USB_DEVICE(USB_VENDOR_ID_MONTEREY, USB_DEVICE_ID_GENIUS_KB29E) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GT683R_LED_PANEL) },
+#endif
+#if IS_ENABLED(CONFIG_HID_MULTITOUCH)
+ { HID_USB_DEVICE(USB_VENDOR_ID_LG, USB_DEVICE_ID_LG_MELFAS_MT) },
+#endif
+#if IS_ENABLED(CONFIG_HID_WIIMOTE)
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE2) },
+#endif
+#if IS_ENABLED(CONFIG_HID_NTI)
{ HID_USB_DEVICE(USB_VENDOR_ID_NTI, USB_DEVICE_ID_USB_SUN) },
+#endif
+#if IS_ENABLED(CONFIG_HID_NTRIG)
{ HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN) },
{ HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_1) },
{ HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_2) },
@@ -2017,13 +2120,41 @@
{ HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_16) },
{ HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_17) },
{ HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_18) },
+#endif
+#if IS_ENABLED(CONFIG_HID_ORTEK)
{ HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_PKB1700) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_WKB2000) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SKYCABLE, USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER) },
+#endif
+#if IS_ENABLED(CONFIG_HID_PANTHERLORD)
+ { HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PSX_ADAPTOR) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PCS_ADAPTOR) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, 0x0003) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_JESS2, USB_DEVICE_ID_JESS2_COLOR_RUMBLE_PAD) },
+#endif
+#if IS_ENABLED(CONFIG_HID_PENMOUNT)
{ HID_USB_DEVICE(USB_VENDOR_ID_PENMOUNT, USB_DEVICE_ID_PENMOUNT_6000) },
+#endif
+#if IS_ENABLED(CONFIG_HID_PETALYNX)
{ HID_USB_DEVICE(USB_VENDOR_ID_PETALYNX, USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE) },
+#endif
+#if IS_ENABLED(CONFIG_HID_PICOLCD)
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD_BOOTLOADER) },
+#endif
+#if IS_ENABLED(CONFIG_HID_PLANTRONICS)
{ HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS, HID_ANY_ID) },
+#endif
+#if IS_ENABLED(CONFIG_HID_PRIMAX)
{ HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_KEYBOARD) },
- { HID_USB_DEVICE(USB_VENDOR_ID_RISO_KAGAKU, USB_DEVICE_ID_RI_KA_WEBMAIL) },
+#endif
+#if IS_ENABLED(CONFIG_HID_PRODIKEYS)
+ { HID_USB_DEVICE(USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_PRODIKEYS_PCMIDI) },
+#endif
+#if IS_ENABLED(CONFIG_HID_RMI)
+ { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_X1_COVER) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_RAZER, USB_DEVICE_ID_RAZER_BLADE_14) },
+#endif
#if IS_ENABLED(CONFIG_HID_ROCCAT)
{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ARVO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ISKU) },
@@ -2051,9 +2182,21 @@
{ HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT5) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT9) },
#endif
+#if IS_ENABLED(CONFIG_HID_SAMSUNG)
{ HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_IR_REMOTE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD_MOUSE) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SKYCABLE, USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER) },
+#endif
+#if IS_ENABLED(CONFIG_HID_SMARTJOYPLUS)
+ { HID_USB_DEVICE(USB_VENDOR_ID_PLAYDOTCOM, USB_DEVICE_ID_PLAYDOTCOM_EMS_USBII) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SMARTJOY_PLUS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SUPER_JOY_BOX_3) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_DUAL_USB_JOYPAD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_JOY_BOX_3_PRO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_DUAL_BOX_PRO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_JOY_BOX_5_PRO) },
+#endif
+#if IS_ENABLED(CONFIG_HID_SONY)
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_PS3) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SMK, USB_DEVICE_ID_SMK_PS3_BDREMOTE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_BUZZ_CONTROLLER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_WIRELESS_BUZZ_CONTROLLER) },
@@ -2072,9 +2215,17 @@
{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGP_MOUSE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SINO_LITE, USB_DEVICE_ID_SINO_LITE_CONTROLLER) },
+#endif
+#if IS_ENABLED(CONFIG_HID_SPEEDLINK)
+ { HID_USB_DEVICE(USB_VENDOR_ID_X_TENSIONS, USB_DEVICE_ID_SPEEDLINK_VAD_CEZANNE) },
+#endif
+#if IS_ENABLED(CONFIG_HID_STEELSERIES)
{ HID_USB_DEVICE(USB_VENDOR_ID_STEELSERIES, USB_DEVICE_ID_STEELSERIES_SRWS1) },
+#endif
+#if IS_ENABLED(CONFIG_HID_SUNPLUS)
{ HID_USB_DEVICE(USB_VENDOR_ID_SUNPLUS, USB_DEVICE_ID_SUNPLUS_WDESKTOP) },
- { HID_USB_DEVICE(USB_VENDOR_ID_THINGM, USB_DEVICE_ID_BLINK1) },
+#endif
+#if IS_ENABLED(CONFIG_HID_THRUSTMASTER)
{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb300) },
{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb304) },
{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb323) },
@@ -2083,12 +2234,25 @@
{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb653) },
{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb654) },
{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb65a) },
+#endif
+#if IS_ENABLED(CONFIG_HID_TIVO)
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_BT) },
{ HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_PRO) },
+#endif
+#if IS_ENABLED(CONFIG_HID_TOPSEED)
+ { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE_2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED, USB_DEVICE_ID_TOPSEED_CYBERLINK) },
{ HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED2, USB_DEVICE_ID_TOPSEED2_RF_COMBO) },
+#endif
+#if IS_ENABLED(CONFIG_HID_TWINHAN)
{ HID_USB_DEVICE(USB_VENDOR_ID_TWINHAN, USB_DEVICE_ID_TWINHAN_IR_REMOTE) },
+#endif
+#if IS_ENABLED(CONFIG_HID_UCLOGIC)
+ { HID_USB_DEVICE(USB_VENDOR_ID_HUION, USB_DEVICE_ID_HUION_TABLET) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_HUION_TABLET) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_PF1209) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP5540U) },
@@ -2096,20 +2260,17 @@
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP1062) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_WIRELESS_TABLET_TWHL850) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_TWHA60) },
- { HID_USB_DEVICE(USB_VENDOR_ID_THQ, USB_DEVICE_ID_THQ_PS3_UDRAW) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_YIYNOVA_TABLET) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UGEE_TABLET_81) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UGEE_TABLET_45) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_DRAWIMAGE_G3) },
- { HID_USB_DEVICE(USB_VENDOR_ID_UGTIZER, USB_DEVICE_ID_UGTIZER_TABLET_GP0610) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UGEE, USB_DEVICE_ID_UGEE_TABLET_EX07S) },
- { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SMARTJOY_PLUS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SUPER_JOY_BOX_3) },
- { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_DUAL_USB_JOYPAD) },
- { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_JOY_BOX_3_PRO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_DUAL_BOX_PRO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_JOY_BOX_5_PRO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_PLAYDOTCOM, USB_DEVICE_ID_PLAYDOTCOM_EMS_USBII) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_UGTIZER, USB_DEVICE_ID_UGTIZER_TABLET_GP0610) },
+#endif
+#if IS_ENABLED(CONFIG_HID_UDRAW_PS3)
+ { HID_USB_DEVICE(USB_VENDOR_ID_THQ, USB_DEVICE_ID_THQ_PS3_UDRAW) },
+#endif
+#if IS_ENABLED(CONFIG_HID_WALTOP)
{ HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SLIM_TABLET_5_8_INCH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SLIM_TABLET_12_1_INCH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_Q_PAD) },
@@ -2117,19 +2278,18 @@
{ HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_10_6_INCH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_14_1_INCH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SIRIUS_BATTERY_FREE_TABLET) },
- { HID_USB_DEVICE(USB_VENDOR_ID_X_TENSIONS, USB_DEVICE_ID_SPEEDLINK_VAD_CEZANNE) },
+#endif
+#if IS_ENABLED(CONFIG_HID_XINMO)
{ HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_XIN_MO_DUAL_ARCADE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_THT_2P_ARCADE) },
+#endif
+#if IS_ENABLED(CONFIG_HID_ZEROPLUS)
{ HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0005) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0030) },
+#endif
+#if IS_ENABLED(CONFIG_HID_ZYDACRON)
{ HID_USB_DEVICE(USB_VENDOR_ID_ZYDACRON, USB_DEVICE_ID_ZYDACRON_REMOTE_CONTROL) },
-
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_BT) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE2) },
- { HID_USB_DEVICE(USB_VENDOR_ID_RAZER, USB_DEVICE_ID_RAZER_BLADE_14) },
- { HID_USB_DEVICE(USB_VENDOR_ID_CMEDIA, USB_DEVICE_ID_CM6533) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_X1_COVER) },
+#endif
{ }
};
diff --git a/drivers/hsi/clients/ssi_protocol.c b/drivers/hsi/clients/ssi_protocol.c
index 26b0510..93d28c0 100644
--- a/drivers/hsi/clients/ssi_protocol.c
+++ b/drivers/hsi/clients/ssi_protocol.c
@@ -1066,7 +1066,7 @@
dev->addr_len = 1;
dev->tx_queue_len = SSIP_TXQUEUE_LEN;
- dev->destructor = free_netdev;
+ dev->needs_free_netdev = true;
dev->header_ops = &phonet_header_ops;
}
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 22d5eaf..5ef2814 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -343,6 +343,7 @@
config SENSORS_ASPEED
tristate "ASPEED AST2400/AST2500 PWM and Fan tach driver"
+ select REGMAP
help
This driver provides support for ASPEED AST2400/AST2500 PWM
and Fan Tacho controllers.
diff --git a/drivers/hwmon/aspeed-pwm-tacho.c b/drivers/hwmon/aspeed-pwm-tacho.c
index 48403a2..9de13d6 100644
--- a/drivers/hwmon/aspeed-pwm-tacho.c
+++ b/drivers/hwmon/aspeed-pwm-tacho.c
@@ -7,6 +7,7 @@
*/
#include <linux/clk.h>
+#include <linux/errno.h>
#include <linux/gpio/consumer.h>
#include <linux/delay.h>
#include <linux/hwmon.h>
@@ -494,7 +495,7 @@
return clk / (clk_unit * div_h * div_l * tacho_div * tacho_unit);
}
-static u32 aspeed_get_fan_tach_ch_rpm(struct aspeed_pwm_tacho_data *priv,
+static int aspeed_get_fan_tach_ch_rpm(struct aspeed_pwm_tacho_data *priv,
u8 fan_tach_ch)
{
u32 raw_data, tach_div, clk_source, sec, val;
@@ -510,6 +511,9 @@
msleep(sec);
regmap_read(priv->regmap, ASPEED_PTCR_RESULT, &val);
+ if (!(val & RESULT_STATUS_MASK))
+ return -ETIMEDOUT;
+
raw_data = val & RESULT_VALUE_MASK;
tach_div = priv->type_fan_tach_clock_division[type];
tach_div = 0x4 << (tach_div * 2);
@@ -561,12 +565,14 @@
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int index = sensor_attr->index;
- u32 rpm;
+ int rpm;
struct aspeed_pwm_tacho_data *priv = dev_get_drvdata(dev);
rpm = aspeed_get_fan_tach_ch_rpm(priv, index);
+ if (rpm < 0)
+ return rpm;
- return sprintf(buf, "%u\n", rpm);
+ return sprintf(buf, "%d\n", rpm);
}
static umode_t pwm_is_visible(struct kobject *kobj,
@@ -591,24 +597,23 @@
return a->mode;
}
-static SENSOR_DEVICE_ATTR(pwm0, 0644,
- show_pwm, set_pwm, 0);
static SENSOR_DEVICE_ATTR(pwm1, 0644,
- show_pwm, set_pwm, 1);
+ show_pwm, set_pwm, 0);
static SENSOR_DEVICE_ATTR(pwm2, 0644,
- show_pwm, set_pwm, 2);
+ show_pwm, set_pwm, 1);
static SENSOR_DEVICE_ATTR(pwm3, 0644,
- show_pwm, set_pwm, 3);
+ show_pwm, set_pwm, 2);
static SENSOR_DEVICE_ATTR(pwm4, 0644,
- show_pwm, set_pwm, 4);
+ show_pwm, set_pwm, 3);
static SENSOR_DEVICE_ATTR(pwm5, 0644,
- show_pwm, set_pwm, 5);
+ show_pwm, set_pwm, 4);
static SENSOR_DEVICE_ATTR(pwm6, 0644,
- show_pwm, set_pwm, 6);
+ show_pwm, set_pwm, 5);
static SENSOR_DEVICE_ATTR(pwm7, 0644,
+ show_pwm, set_pwm, 6);
+static SENSOR_DEVICE_ATTR(pwm8, 0644,
show_pwm, set_pwm, 7);
static struct attribute *pwm_dev_attrs[] = {
- &sensor_dev_attr_pwm0.dev_attr.attr,
&sensor_dev_attr_pwm1.dev_attr.attr,
&sensor_dev_attr_pwm2.dev_attr.attr,
&sensor_dev_attr_pwm3.dev_attr.attr,
@@ -616,6 +621,7 @@
&sensor_dev_attr_pwm5.dev_attr.attr,
&sensor_dev_attr_pwm6.dev_attr.attr,
&sensor_dev_attr_pwm7.dev_attr.attr,
+ &sensor_dev_attr_pwm8.dev_attr.attr,
NULL,
};
@@ -624,40 +630,39 @@
.is_visible = pwm_is_visible,
};
-static SENSOR_DEVICE_ATTR(fan0_input, 0444,
- show_rpm, NULL, 0);
static SENSOR_DEVICE_ATTR(fan1_input, 0444,
- show_rpm, NULL, 1);
+ show_rpm, NULL, 0);
static SENSOR_DEVICE_ATTR(fan2_input, 0444,
- show_rpm, NULL, 2);
+ show_rpm, NULL, 1);
static SENSOR_DEVICE_ATTR(fan3_input, 0444,
- show_rpm, NULL, 3);
+ show_rpm, NULL, 2);
static SENSOR_DEVICE_ATTR(fan4_input, 0444,
- show_rpm, NULL, 4);
+ show_rpm, NULL, 3);
static SENSOR_DEVICE_ATTR(fan5_input, 0444,
- show_rpm, NULL, 5);
+ show_rpm, NULL, 4);
static SENSOR_DEVICE_ATTR(fan6_input, 0444,
- show_rpm, NULL, 6);
+ show_rpm, NULL, 5);
static SENSOR_DEVICE_ATTR(fan7_input, 0444,
- show_rpm, NULL, 7);
+ show_rpm, NULL, 6);
static SENSOR_DEVICE_ATTR(fan8_input, 0444,
- show_rpm, NULL, 8);
+ show_rpm, NULL, 7);
static SENSOR_DEVICE_ATTR(fan9_input, 0444,
- show_rpm, NULL, 9);
+ show_rpm, NULL, 8);
static SENSOR_DEVICE_ATTR(fan10_input, 0444,
- show_rpm, NULL, 10);
+ show_rpm, NULL, 9);
static SENSOR_DEVICE_ATTR(fan11_input, 0444,
- show_rpm, NULL, 11);
+ show_rpm, NULL, 10);
static SENSOR_DEVICE_ATTR(fan12_input, 0444,
- show_rpm, NULL, 12);
+ show_rpm, NULL, 11);
static SENSOR_DEVICE_ATTR(fan13_input, 0444,
- show_rpm, NULL, 13);
+ show_rpm, NULL, 12);
static SENSOR_DEVICE_ATTR(fan14_input, 0444,
- show_rpm, NULL, 14);
+ show_rpm, NULL, 13);
static SENSOR_DEVICE_ATTR(fan15_input, 0444,
+ show_rpm, NULL, 14);
+static SENSOR_DEVICE_ATTR(fan16_input, 0444,
show_rpm, NULL, 15);
static struct attribute *fan_dev_attrs[] = {
- &sensor_dev_attr_fan0_input.dev_attr.attr,
&sensor_dev_attr_fan1_input.dev_attr.attr,
&sensor_dev_attr_fan2_input.dev_attr.attr,
&sensor_dev_attr_fan3_input.dev_attr.attr,
@@ -673,6 +678,7 @@
&sensor_dev_attr_fan13_input.dev_attr.attr,
&sensor_dev_attr_fan14_input.dev_attr.attr,
&sensor_dev_attr_fan15_input.dev_attr.attr,
+ &sensor_dev_attr_fan16_input.dev_attr.attr,
NULL
};
@@ -802,7 +808,6 @@
if (ret)
return ret;
}
- of_node_put(np);
priv->groups[0] = &pwm_dev_group;
priv->groups[1] = &fan_dev_group;
diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c
index f573448..e98e44e 100644
--- a/drivers/i2c/busses/i2c-ismt.c
+++ b/drivers/i2c/busses/i2c-ismt.c
@@ -584,7 +584,7 @@
/* unmap the data buffer */
if (dma_size != 0)
- dma_unmap_single(&adap->dev, dma_addr, dma_size, dma_direction);
+ dma_unmap_single(dev, dma_addr, dma_size, dma_direction);
if (unlikely(!time_left)) {
dev_err(dev, "completion wait timed out\n");
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
index 214bf28..8be3e6c 100644
--- a/drivers/i2c/busses/i2c-rcar.c
+++ b/drivers/i2c/busses/i2c-rcar.c
@@ -319,7 +319,7 @@
rcar_i2c_write(priv, ICFBSCR, TCYC06);
dma_unmap_single(chan->device->dev, sg_dma_address(&priv->sg),
- priv->msg->len, priv->dma_direction);
+ sg_dma_len(&priv->sg), priv->dma_direction);
priv->dma_direction = DMA_NONE;
}
diff --git a/drivers/iio/adc/bcm_iproc_adc.c b/drivers/iio/adc/bcm_iproc_adc.c
index 21d38c8..7f4f9c4 100644
--- a/drivers/iio/adc/bcm_iproc_adc.c
+++ b/drivers/iio/adc/bcm_iproc_adc.c
@@ -143,7 +143,7 @@
iproc_adc_dbg_reg(dev, adc_priv, IPROC_SOFT_BYPASS_DATA);
}
-static irqreturn_t iproc_adc_interrupt_handler(int irq, void *data)
+static irqreturn_t iproc_adc_interrupt_thread(int irq, void *data)
{
u32 channel_intr_status;
u32 intr_status;
@@ -167,7 +167,7 @@
return IRQ_NONE;
}
-static irqreturn_t iproc_adc_interrupt_thread(int irq, void *data)
+static irqreturn_t iproc_adc_interrupt_handler(int irq, void *data)
{
irqreturn_t retval = IRQ_NONE;
struct iproc_adc_priv *adc_priv;
@@ -181,7 +181,7 @@
adc_priv = iio_priv(indio_dev);
regmap_read(adc_priv->regmap, IPROC_INTERRUPT_STATUS, &intr_status);
- dev_dbg(&indio_dev->dev, "iproc_adc_interrupt_thread(),INTRPT_STS:%x\n",
+ dev_dbg(&indio_dev->dev, "iproc_adc_interrupt_handler(),INTRPT_STS:%x\n",
intr_status);
intr_channels = (intr_status & IPROC_ADC_INTR_MASK) >> IPROC_ADC_INTR;
@@ -566,8 +566,8 @@
}
ret = devm_request_threaded_irq(&pdev->dev, adc_priv->irqno,
- iproc_adc_interrupt_thread,
iproc_adc_interrupt_handler,
+ iproc_adc_interrupt_thread,
IRQF_SHARED, "iproc-adc", indio_dev);
if (ret) {
dev_err(&pdev->dev, "request_irq error %d\n", ret);
diff --git a/drivers/iio/adc/max9611.c b/drivers/iio/adc/max9611.c
index ec82106..b0526e4 100644
--- a/drivers/iio/adc/max9611.c
+++ b/drivers/iio/adc/max9611.c
@@ -438,10 +438,10 @@
struct max9611_dev *max9611 = iio_priv(dev_to_iio_dev(dev));
unsigned int i, r;
- i = max9611->shunt_resistor_uohm / 1000;
- r = max9611->shunt_resistor_uohm % 1000;
+ i = max9611->shunt_resistor_uohm / 1000000;
+ r = max9611->shunt_resistor_uohm % 1000000;
- return sprintf(buf, "%u.%03u\n", i, r);
+ return sprintf(buf, "%u.%06u\n", i, r);
}
static IIO_DEVICE_ATTR(in_power_shunt_resistor, 0444,
@@ -536,8 +536,8 @@
int ret;
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*max9611));
- if (IS_ERR(indio_dev))
- return PTR_ERR(indio_dev);
+ if (!indio_dev)
+ return -ENOMEM;
i2c_set_clientdata(client, indio_dev);
diff --git a/drivers/iio/adc/meson_saradc.c b/drivers/iio/adc/meson_saradc.c
index dd4190b..6066bbf 100644
--- a/drivers/iio/adc/meson_saradc.c
+++ b/drivers/iio/adc/meson_saradc.c
@@ -468,13 +468,13 @@
static void meson_sar_adc_clear_fifo(struct iio_dev *indio_dev)
{
struct meson_sar_adc_priv *priv = iio_priv(indio_dev);
- int count;
+ unsigned int count, tmp;
for (count = 0; count < MESON_SAR_ADC_MAX_FIFO_SIZE; count++) {
if (!meson_sar_adc_get_fifo_count(indio_dev))
break;
- regmap_read(priv->regmap, MESON_SAR_ADC_FIFO_RD, 0);
+ regmap_read(priv->regmap, MESON_SAR_ADC_FIFO_RD, &tmp);
}
}
diff --git a/drivers/iio/adc/mxs-lradc-adc.c b/drivers/iio/adc/mxs-lradc-adc.c
index b0c7d8e..6888167 100644
--- a/drivers/iio/adc/mxs-lradc-adc.c
+++ b/drivers/iio/adc/mxs-lradc-adc.c
@@ -718,9 +718,12 @@
adc->dev = dev;
iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!iores)
+ return -EINVAL;
+
adc->base = devm_ioremap(dev, iores->start, resource_size(iores));
- if (IS_ERR(adc->base))
- return PTR_ERR(adc->base);
+ if (!adc->base)
+ return -ENOMEM;
init_completion(&adc->completion);
spin_lock_init(&adc->lock);
diff --git a/drivers/iio/adc/sun4i-gpadc-iio.c b/drivers/iio/adc/sun4i-gpadc-iio.c
index b235273..81d4c39 100644
--- a/drivers/iio/adc/sun4i-gpadc-iio.c
+++ b/drivers/iio/adc/sun4i-gpadc-iio.c
@@ -105,6 +105,8 @@
bool no_irq;
/* prevents concurrent reads of temperature and ADC */
struct mutex mutex;
+ struct thermal_zone_device *tzd;
+ struct device *sensor_device;
};
#define SUN4I_GPADC_ADC_CHANNEL(_channel, _name) { \
@@ -502,7 +504,6 @@
{
struct sun4i_gpadc_iio *info = iio_priv(indio_dev);
const struct of_device_id *of_dev;
- struct thermal_zone_device *tzd;
struct resource *mem;
void __iomem *base;
int ret;
@@ -532,13 +533,14 @@
if (!IS_ENABLED(CONFIG_THERMAL_OF))
return 0;
- tzd = devm_thermal_zone_of_sensor_register(&pdev->dev, 0, info,
- &sun4i_ts_tz_ops);
- if (IS_ERR(tzd))
+ info->sensor_device = &pdev->dev;
+ info->tzd = thermal_zone_of_sensor_register(info->sensor_device, 0,
+ info, &sun4i_ts_tz_ops);
+ if (IS_ERR(info->tzd))
dev_err(&pdev->dev, "could not register thermal sensor: %ld\n",
- PTR_ERR(tzd));
+ PTR_ERR(info->tzd));
- return PTR_ERR_OR_ZERO(tzd);
+ return PTR_ERR_OR_ZERO(info->tzd);
}
static int sun4i_gpadc_probe_mfd(struct platform_device *pdev,
@@ -584,15 +586,15 @@
* of_node, and the device from this driver as third argument to
* return the temperature.
*/
- struct thermal_zone_device *tzd;
- tzd = devm_thermal_zone_of_sensor_register(pdev->dev.parent, 0,
- info,
- &sun4i_ts_tz_ops);
- if (IS_ERR(tzd)) {
+ info->sensor_device = pdev->dev.parent;
+ info->tzd = thermal_zone_of_sensor_register(info->sensor_device,
+ 0, info,
+ &sun4i_ts_tz_ops);
+ if (IS_ERR(info->tzd)) {
dev_err(&pdev->dev,
"could not register thermal sensor: %ld\n",
- PTR_ERR(tzd));
- return PTR_ERR(tzd);
+ PTR_ERR(info->tzd));
+ return PTR_ERR(info->tzd);
}
} else {
indio_dev->num_channels =
@@ -688,7 +690,13 @@
pm_runtime_put(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- if (!info->no_irq && IS_ENABLED(CONFIG_THERMAL_OF))
+
+ if (!IS_ENABLED(CONFIG_THERMAL_OF))
+ return 0;
+
+ thermal_zone_of_sensor_unregister(info->sensor_device, info->tzd);
+
+ if (!info->no_irq)
iio_map_array_unregister(indio_dev);
return 0;
@@ -700,6 +708,7 @@
{ "sun6i-a31-gpadc-iio", (kernel_ulong_t)&sun6i_gpadc_data },
{ /* sentinel */ },
};
+MODULE_DEVICE_TABLE(platform, sun4i_gpadc_id);
static struct platform_driver sun4i_gpadc_driver = {
.driver = {
@@ -711,6 +720,7 @@
.probe = sun4i_gpadc_probe,
.remove = sun4i_gpadc_remove,
};
+MODULE_DEVICE_TABLE(of, sun4i_gpadc_of_id);
module_platform_driver(sun4i_gpadc_driver);
diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c
index 4282cec..6cbed7e 100644
--- a/drivers/iio/adc/ti_am335x_adc.c
+++ b/drivers/iio/adc/ti_am335x_adc.c
@@ -614,7 +614,7 @@
return -EINVAL;
}
- indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*indio_dev));
+ indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*adc_dev));
if (indio_dev == NULL) {
dev_err(&pdev->dev, "failed to allocate iio device\n");
return -ENOMEM;
diff --git a/drivers/iio/buffer/industrialio-buffer-dma.c b/drivers/iio/buffer/industrialio-buffer-dma.c
index dd99d27..ff03324 100644
--- a/drivers/iio/buffer/industrialio-buffer-dma.c
+++ b/drivers/iio/buffer/industrialio-buffer-dma.c
@@ -14,6 +14,7 @@
#include <linux/sched.h>
#include <linux/poll.h>
#include <linux/iio/buffer.h>
+#include <linux/iio/buffer_impl.h>
#include <linux/iio/buffer-dma.h>
#include <linux/dma-mapping.h>
#include <linux/sizes.h>
diff --git a/drivers/iio/buffer/industrialio-buffer-dmaengine.c b/drivers/iio/buffer/industrialio-buffer-dmaengine.c
index 9fabed4..2b5a320 100644
--- a/drivers/iio/buffer/industrialio-buffer-dmaengine.c
+++ b/drivers/iio/buffer/industrialio-buffer-dmaengine.c
@@ -14,6 +14,7 @@
#include <linux/iio/iio.h>
#include <linux/iio/buffer.h>
+#include <linux/iio/buffer_impl.h>
#include <linux/iio/buffer-dma.h>
#include <linux/iio/buffer-dmaengine.h>
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
index 96dabbd..88a7c5d 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
@@ -41,6 +41,7 @@
static const struct inv_mpu6050_reg_map reg_set_6500 = {
.sample_rate_div = INV_MPU6050_REG_SAMPLE_RATE_DIV,
.lpf = INV_MPU6050_REG_CONFIG,
+ .accel_lpf = INV_MPU6500_REG_ACCEL_CONFIG_2,
.user_ctrl = INV_MPU6050_REG_USER_CTRL,
.fifo_en = INV_MPU6050_REG_FIFO_EN,
.gyro_config = INV_MPU6050_REG_GYRO_CONFIG,
@@ -211,6 +212,37 @@
EXPORT_SYMBOL_GPL(inv_mpu6050_set_power_itg);
/**
+ * inv_mpu6050_set_lpf_regs() - set low pass filter registers, chip dependent
+ *
+ * MPU60xx/MPU9150 use only 1 register for accelerometer + gyroscope
+ * MPU6500 and above have a dedicated register for accelerometer
+ */
+static int inv_mpu6050_set_lpf_regs(struct inv_mpu6050_state *st,
+ enum inv_mpu6050_filter_e val)
+{
+ int result;
+
+ result = regmap_write(st->map, st->reg->lpf, val);
+ if (result)
+ return result;
+
+ switch (st->chip_type) {
+ case INV_MPU6050:
+ case INV_MPU6000:
+ case INV_MPU9150:
+ /* old chips, nothing to do */
+ result = 0;
+ break;
+ default:
+ /* set accel lpf */
+ result = regmap_write(st->map, st->reg->accel_lpf, val);
+ break;
+ }
+
+ return result;
+}
+
+/**
* inv_mpu6050_init_config() - Initialize hardware, disable FIFO.
*
* Initial configuration:
@@ -233,8 +265,7 @@
if (result)
return result;
- d = INV_MPU6050_FILTER_20HZ;
- result = regmap_write(st->map, st->reg->lpf, d);
+ result = inv_mpu6050_set_lpf_regs(st, INV_MPU6050_FILTER_20HZ);
if (result)
return result;
@@ -537,6 +568,8 @@
* would be alising. This function basically search for the
* correct low pass parameters based on the fifo rate, e.g,
* sampling frequency.
+ *
+ * lpf is set automatically when setting sampling rate to avoid any aliases.
*/
static int inv_mpu6050_set_lpf(struct inv_mpu6050_state *st, int rate)
{
@@ -552,7 +585,7 @@
while ((h < hz[i]) && (i < ARRAY_SIZE(d) - 1))
i++;
data = d[i];
- result = regmap_write(st->map, st->reg->lpf, data);
+ result = inv_mpu6050_set_lpf_regs(st, data);
if (result)
return result;
st->chip_config.lpf = data;
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
index ef13de7..953a0c0 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
@@ -28,6 +28,7 @@
* struct inv_mpu6050_reg_map - Notable registers.
* @sample_rate_div: Divider applied to gyro output rate.
* @lpf: Configures internal low pass filter.
+ * @accel_lpf: Configures accelerometer low pass filter.
* @user_ctrl: Enables/resets the FIFO.
* @fifo_en: Determines which data will appear in FIFO.
* @gyro_config: gyro config register.
@@ -47,6 +48,7 @@
struct inv_mpu6050_reg_map {
u8 sample_rate_div;
u8 lpf;
+ u8 accel_lpf;
u8 user_ctrl;
u8 fifo_en;
u8 gyro_config;
@@ -188,6 +190,7 @@
#define INV_MPU6050_FIFO_THRESHOLD 500
/* mpu6500 registers */
+#define INV_MPU6500_REG_ACCEL_CONFIG_2 0x1D
#define INV_MPU6500_REG_ACCEL_OFFSET 0x77
/* delay time in milliseconds */
diff --git a/drivers/iio/industrialio-trigger.c b/drivers/iio/industrialio-trigger.c
index 978e1592..4061fed 100644
--- a/drivers/iio/industrialio-trigger.c
+++ b/drivers/iio/industrialio-trigger.c
@@ -451,7 +451,8 @@
return len;
out_trigger_put:
- iio_trigger_put(trig);
+ if (trig)
+ iio_trigger_put(trig);
return ret;
}
diff --git a/drivers/iio/light/ltr501.c b/drivers/iio/light/ltr501.c
index b30e0c1..67838ed 100644
--- a/drivers/iio/light/ltr501.c
+++ b/drivers/iio/light/ltr501.c
@@ -74,9 +74,9 @@
static const struct reg_field reg_field_it =
REG_FIELD(LTR501_ALS_MEAS_RATE, 3, 4);
static const struct reg_field reg_field_als_intr =
- REG_FIELD(LTR501_INTR, 0, 0);
-static const struct reg_field reg_field_ps_intr =
REG_FIELD(LTR501_INTR, 1, 1);
+static const struct reg_field reg_field_ps_intr =
+ REG_FIELD(LTR501_INTR, 0, 0);
static const struct reg_field reg_field_als_rate =
REG_FIELD(LTR501_ALS_MEAS_RATE, 0, 2);
static const struct reg_field reg_field_ps_rate =
diff --git a/drivers/iio/proximity/as3935.c b/drivers/iio/proximity/as3935.c
index ddf9bee..aa4df0d 100644
--- a/drivers/iio/proximity/as3935.c
+++ b/drivers/iio/proximity/as3935.c
@@ -40,9 +40,9 @@
#define AS3935_AFE_PWR_BIT BIT(0)
#define AS3935_INT 0x03
-#define AS3935_INT_MASK 0x07
+#define AS3935_INT_MASK 0x0f
#define AS3935_EVENT_INT BIT(3)
-#define AS3935_NOISE_INT BIT(1)
+#define AS3935_NOISE_INT BIT(0)
#define AS3935_DATA 0x07
#define AS3935_DATA_MASK 0x3F
@@ -215,7 +215,7 @@
st->buffer[0] = val & AS3935_DATA_MASK;
iio_push_to_buffers_with_timestamp(indio_dev, &st->buffer,
- pf->timestamp);
+ iio_get_time_ns(indio_dev));
err_read:
iio_trigger_notify_done(indio_dev->trig);
@@ -244,7 +244,7 @@
switch (val) {
case AS3935_EVENT_INT:
- iio_trigger_poll(st->trig);
+ iio_trigger_poll_chained(st->trig);
break;
case AS3935_NOISE_INT:
dev_warn(&st->spi->dev, "noise level is too high\n");
@@ -269,8 +269,6 @@
static void calibrate_as3935(struct as3935_state *st)
{
- mutex_lock(&st->lock);
-
/* mask disturber interrupt bit */
as3935_write(st, AS3935_INT, BIT(5));
@@ -280,8 +278,6 @@
mdelay(2);
as3935_write(st, AS3935_TUNE_CAP, (st->tune_cap / TUNE_CAP_DIV));
-
- mutex_unlock(&st->lock);
}
#ifdef CONFIG_PM_SLEEP
@@ -318,6 +314,8 @@
val &= ~AS3935_AFE_PWR_BIT;
ret = as3935_write(st, AS3935_AFE_GAIN, val);
+ calibrate_as3935(st);
+
err_resume:
mutex_unlock(&st->lock);
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index 02971e2..ece6926 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -449,12 +449,7 @@
return ret;
rt = (struct rt6_info *)dst;
- if (ipv6_addr_any(&fl6.saddr)) {
- ret = ipv6_dev_get_saddr(addr->net, ip6_dst_idev(dst)->dev,
- &fl6.daddr, 0, &fl6.saddr);
- if (ret)
- goto put;
-
+ if (ipv6_addr_any(&src_in->sin6_addr)) {
src_in->sin6_family = AF_INET6;
src_in->sin6_addr = fl6.saddr;
}
@@ -471,9 +466,6 @@
*pdst = dst;
return 0;
-put:
- dst_release(dst);
- return ret;
}
#else
static int addr6_resolve(struct sockaddr_in6 *src_in,
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 1844770..2b4d613 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -1429,7 +1429,7 @@
primary_path->packet_life_time =
cm_req_get_primary_local_ack_timeout(req_msg);
primary_path->packet_life_time -= (primary_path->packet_life_time > 0);
- sa_path_set_service_id(primary_path, req_msg->service_id);
+ primary_path->service_id = req_msg->service_id;
if (req_msg->alt_local_lid) {
alt_path->dgid = req_msg->alt_local_gid;
@@ -1452,7 +1452,7 @@
alt_path->packet_life_time =
cm_req_get_alt_local_ack_timeout(req_msg);
alt_path->packet_life_time -= (alt_path->packet_life_time > 0);
- sa_path_set_service_id(alt_path, req_msg->service_id);
+ alt_path->service_id = req_msg->service_id;
}
}
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 91b7a2f..31bb82d 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -1140,7 +1140,7 @@
ib->sib_pkey = path->pkey;
ib->sib_flowinfo = path->flow_label;
memcpy(&ib->sib_addr, &path->sgid, 16);
- ib->sib_sid = sa_path_get_service_id(path);
+ ib->sib_sid = path->service_id;
ib->sib_scope_id = 0;
} else {
ib->sib_pkey = listen_ib->sib_pkey;
@@ -1274,8 +1274,7 @@
memcpy(&req->local_gid, &req_param->primary_path->sgid,
sizeof(req->local_gid));
req->has_gid = true;
- req->service_id =
- sa_path_get_service_id(req_param->primary_path);
+ req->service_id = req_param->primary_path->service_id;
req->pkey = be16_to_cpu(req_param->primary_path->pkey);
if (req->pkey != req_param->bth_pkey)
pr_warn_ratelimited("RDMA CMA: got different BTH P_Key (0x%x) and primary path P_Key (0x%x)\n"
@@ -1827,7 +1826,8 @@
struct rdma_route *rt;
const sa_family_t ss_family = listen_id->route.addr.src_addr.ss_family;
struct sa_path_rec *path = ib_event->param.req_rcvd.primary_path;
- const __be64 service_id = sa_path_get_service_id(path);
+ const __be64 service_id =
+ ib_event->param.req_rcvd.primary_path->service_id;
int ret;
id = rdma_create_id(listen_id->route.addr.dev_addr.net,
@@ -2345,9 +2345,8 @@
path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
path_rec.numb_path = 1;
path_rec.reversible = 1;
- sa_path_set_service_id(&path_rec,
- rdma_get_service_id(&id_priv->id,
- cma_dst_addr(id_priv)));
+ path_rec.service_id = rdma_get_service_id(&id_priv->id,
+ cma_dst_addr(id_priv));
comp_mask = IB_SA_PATH_REC_DGID | IB_SA_PATH_REC_SGID |
IB_SA_PATH_REC_PKEY | IB_SA_PATH_REC_NUMB_PATH |
diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h
index cb7d372..d92ab4e 100644
--- a/drivers/infiniband/core/core_priv.h
+++ b/drivers/infiniband/core/core_priv.h
@@ -169,6 +169,16 @@
int ib_sa_init(void);
void ib_sa_cleanup(void);
+int ibnl_init(void);
+void ibnl_cleanup(void);
+
+/**
+ * Check if there are any listeners to the netlink group
+ * @group: the netlink group ID
+ * Returns 0 on success or a negative for no listeners.
+ */
+int ibnl_chk_listeners(unsigned int group);
+
int ib_nl_handle_resolve_resp(struct sk_buff *skb,
struct netlink_callback *cb);
int ib_nl_handle_set_timeout(struct sk_buff *skb,
diff --git a/drivers/infiniband/core/netlink.c b/drivers/infiniband/core/netlink.c
index b784055..94931c4 100644
--- a/drivers/infiniband/core/netlink.c
+++ b/drivers/infiniband/core/netlink.c
@@ -37,6 +37,7 @@
#include <net/net_namespace.h>
#include <net/sock.h>
#include <rdma/rdma_netlink.h>
+#include "core_priv.h"
struct ibnl_client {
struct list_head list;
@@ -55,7 +56,6 @@
return -1;
return 0;
}
-EXPORT_SYMBOL(ibnl_chk_listeners);
int ibnl_add_client(int index, int nops,
const struct ibnl_client_cbs cb_table[])
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index e335b09c..fb7aec4 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -194,7 +194,7 @@
.field_name = "sa_path_rec:" #field
static const struct ib_field path_rec_table[] = {
- { PATH_REC_FIELD(ib.service_id),
+ { PATH_REC_FIELD(service_id),
.offset_words = 0,
.offset_bits = 0,
.size_bits = 64 },
@@ -296,7 +296,7 @@
.field_name = "sa_path_rec:" #field
static const struct ib_field opa_path_rec_table[] = {
- { OPA_PATH_REC_FIELD(opa.service_id),
+ { OPA_PATH_REC_FIELD(service_id),
.offset_words = 0,
.offset_bits = 0,
.size_bits = 64 },
@@ -774,7 +774,7 @@
/* Now build the attributes */
if (comp_mask & IB_SA_PATH_REC_SERVICE_ID) {
- val64 = be64_to_cpu(sa_path_get_service_id(sa_rec));
+ val64 = be64_to_cpu(sa_rec->service_id);
nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_SERVICE_ID,
sizeof(val64), &val64);
}
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index 3dbf811..21e60b1 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -58,7 +58,7 @@
for_each_sg(umem->sg_head.sgl, sg, umem->npages, i) {
page = sg_page(sg);
- if (umem->writable && dirty)
+ if (!PageDirty(page) && umem->writable && dirty)
set_page_dirty_lock(page);
put_page(page);
}
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
index 0780b1a..8c4ec56 100644
--- a/drivers/infiniband/core/umem_odp.c
+++ b/drivers/infiniband/core/umem_odp.c
@@ -321,11 +321,15 @@
struct vm_area_struct *vma;
struct hstate *h;
+ down_read(&mm->mmap_sem);
vma = find_vma(mm, ib_umem_start(umem));
- if (!vma || !is_vm_hugetlb_page(vma))
+ if (!vma || !is_vm_hugetlb_page(vma)) {
+ up_read(&mm->mmap_sem);
return -EINVAL;
+ }
h = hstate_vma(vma);
umem->page_shift = huge_page_shift(h);
+ up_read(&mm->mmap_sem);
umem->hugetlb = 1;
} else {
umem->hugetlb = 0;
diff --git a/drivers/infiniband/core/uverbs_marshall.c b/drivers/infiniband/core/uverbs_marshall.c
index 8b9587f..94fd989 100644
--- a/drivers/infiniband/core/uverbs_marshall.c
+++ b/drivers/infiniband/core/uverbs_marshall.c
@@ -96,11 +96,11 @@
}
EXPORT_SYMBOL(ib_copy_qp_attr_to_user);
-void __ib_copy_path_rec_to_user(struct ib_user_path_rec *dst,
- struct sa_path_rec *src)
+static void __ib_copy_path_rec_to_user(struct ib_user_path_rec *dst,
+ struct sa_path_rec *src)
{
- memcpy(dst->dgid, src->dgid.raw, sizeof src->dgid);
- memcpy(dst->sgid, src->sgid.raw, sizeof src->sgid);
+ memcpy(dst->dgid, src->dgid.raw, sizeof(src->dgid));
+ memcpy(dst->sgid, src->sgid.raw, sizeof(src->sgid));
dst->dlid = htons(ntohl(sa_path_get_dlid(src)));
dst->slid = htons(ntohl(sa_path_get_slid(src)));
diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
index ebf7be8..0877283 100644
--- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h
+++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
@@ -56,6 +56,10 @@
#define BNXT_RE_MAX_SRQC_COUNT (64 * 1024)
#define BNXT_RE_MAX_CQ_COUNT (64 * 1024)
+#define BNXT_RE_UD_QP_HW_STALL 0x400000
+
+#define BNXT_RE_RQ_WQE_THRESHOLD 32
+
struct bnxt_re_work {
struct work_struct work;
unsigned long event;
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index 7ba9e69..c7bd683 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -61,6 +61,48 @@
#include "ib_verbs.h"
#include <rdma/bnxt_re-abi.h>
+static int __from_ib_access_flags(int iflags)
+{
+ int qflags = 0;
+
+ if (iflags & IB_ACCESS_LOCAL_WRITE)
+ qflags |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
+ if (iflags & IB_ACCESS_REMOTE_READ)
+ qflags |= BNXT_QPLIB_ACCESS_REMOTE_READ;
+ if (iflags & IB_ACCESS_REMOTE_WRITE)
+ qflags |= BNXT_QPLIB_ACCESS_REMOTE_WRITE;
+ if (iflags & IB_ACCESS_REMOTE_ATOMIC)
+ qflags |= BNXT_QPLIB_ACCESS_REMOTE_ATOMIC;
+ if (iflags & IB_ACCESS_MW_BIND)
+ qflags |= BNXT_QPLIB_ACCESS_MW_BIND;
+ if (iflags & IB_ZERO_BASED)
+ qflags |= BNXT_QPLIB_ACCESS_ZERO_BASED;
+ if (iflags & IB_ACCESS_ON_DEMAND)
+ qflags |= BNXT_QPLIB_ACCESS_ON_DEMAND;
+ return qflags;
+};
+
+static enum ib_access_flags __to_ib_access_flags(int qflags)
+{
+ enum ib_access_flags iflags = 0;
+
+ if (qflags & BNXT_QPLIB_ACCESS_LOCAL_WRITE)
+ iflags |= IB_ACCESS_LOCAL_WRITE;
+ if (qflags & BNXT_QPLIB_ACCESS_REMOTE_WRITE)
+ iflags |= IB_ACCESS_REMOTE_WRITE;
+ if (qflags & BNXT_QPLIB_ACCESS_REMOTE_READ)
+ iflags |= IB_ACCESS_REMOTE_READ;
+ if (qflags & BNXT_QPLIB_ACCESS_REMOTE_ATOMIC)
+ iflags |= IB_ACCESS_REMOTE_ATOMIC;
+ if (qflags & BNXT_QPLIB_ACCESS_MW_BIND)
+ iflags |= IB_ACCESS_MW_BIND;
+ if (qflags & BNXT_QPLIB_ACCESS_ZERO_BASED)
+ iflags |= IB_ZERO_BASED;
+ if (qflags & BNXT_QPLIB_ACCESS_ON_DEMAND)
+ iflags |= IB_ACCESS_ON_DEMAND;
+ return iflags;
+};
+
static int bnxt_re_build_sgl(struct ib_sge *ib_sg_list,
struct bnxt_qplib_sge *sg_list, int num)
{
@@ -149,8 +191,8 @@
ib_attr->max_total_mcast_qp_attach = 0;
ib_attr->max_ah = dev_attr->max_ah;
- ib_attr->max_fmr = dev_attr->max_fmr;
- ib_attr->max_map_per_fmr = 1; /* ? */
+ ib_attr->max_fmr = 0;
+ ib_attr->max_map_per_fmr = 0;
ib_attr->max_srq = dev_attr->max_srq;
ib_attr->max_srq_wr = dev_attr->max_srq_wqes;
@@ -410,6 +452,158 @@
return IB_LINK_LAYER_ETHERNET;
}
+#define BNXT_RE_FENCE_PBL_SIZE DIV_ROUND_UP(BNXT_RE_FENCE_BYTES, PAGE_SIZE)
+
+static void bnxt_re_create_fence_wqe(struct bnxt_re_pd *pd)
+{
+ struct bnxt_re_fence_data *fence = &pd->fence;
+ struct ib_mr *ib_mr = &fence->mr->ib_mr;
+ struct bnxt_qplib_swqe *wqe = &fence->bind_wqe;
+
+ memset(wqe, 0, sizeof(*wqe));
+ wqe->type = BNXT_QPLIB_SWQE_TYPE_BIND_MW;
+ wqe->wr_id = BNXT_QPLIB_FENCE_WRID;
+ wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
+ wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
+ wqe->bind.zero_based = false;
+ wqe->bind.parent_l_key = ib_mr->lkey;
+ wqe->bind.va = (u64)(unsigned long)fence->va;
+ wqe->bind.length = fence->size;
+ wqe->bind.access_cntl = __from_ib_access_flags(IB_ACCESS_REMOTE_READ);
+ wqe->bind.mw_type = SQ_BIND_MW_TYPE_TYPE1;
+
+ /* Save the initial rkey in fence structure for now;
+ * wqe->bind.r_key will be set at (re)bind time.
+ */
+ fence->bind_rkey = ib_inc_rkey(fence->mw->rkey);
+}
+
+static int bnxt_re_bind_fence_mw(struct bnxt_qplib_qp *qplib_qp)
+{
+ struct bnxt_re_qp *qp = container_of(qplib_qp, struct bnxt_re_qp,
+ qplib_qp);
+ struct ib_pd *ib_pd = qp->ib_qp.pd;
+ struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
+ struct bnxt_re_fence_data *fence = &pd->fence;
+ struct bnxt_qplib_swqe *fence_wqe = &fence->bind_wqe;
+ struct bnxt_qplib_swqe wqe;
+ int rc;
+
+ memcpy(&wqe, fence_wqe, sizeof(wqe));
+ wqe.bind.r_key = fence->bind_rkey;
+ fence->bind_rkey = ib_inc_rkey(fence->bind_rkey);
+
+ dev_dbg(rdev_to_dev(qp->rdev),
+ "Posting bind fence-WQE: rkey: %#x QP: %d PD: %p\n",
+ wqe.bind.r_key, qp->qplib_qp.id, pd);
+ rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
+ if (rc) {
+ dev_err(rdev_to_dev(qp->rdev), "Failed to bind fence-WQE\n");
+ return rc;
+ }
+ bnxt_qplib_post_send_db(&qp->qplib_qp);
+
+ return rc;
+}
+
+static void bnxt_re_destroy_fence_mr(struct bnxt_re_pd *pd)
+{
+ struct bnxt_re_fence_data *fence = &pd->fence;
+ struct bnxt_re_dev *rdev = pd->rdev;
+ struct device *dev = &rdev->en_dev->pdev->dev;
+ struct bnxt_re_mr *mr = fence->mr;
+
+ if (fence->mw) {
+ bnxt_re_dealloc_mw(fence->mw);
+ fence->mw = NULL;
+ }
+ if (mr) {
+ if (mr->ib_mr.rkey)
+ bnxt_qplib_dereg_mrw(&rdev->qplib_res, &mr->qplib_mr,
+ true);
+ if (mr->ib_mr.lkey)
+ bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
+ kfree(mr);
+ fence->mr = NULL;
+ }
+ if (fence->dma_addr) {
+ dma_unmap_single(dev, fence->dma_addr, BNXT_RE_FENCE_BYTES,
+ DMA_BIDIRECTIONAL);
+ fence->dma_addr = 0;
+ }
+}
+
+static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd)
+{
+ int mr_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_MW_BIND;
+ struct bnxt_re_fence_data *fence = &pd->fence;
+ struct bnxt_re_dev *rdev = pd->rdev;
+ struct device *dev = &rdev->en_dev->pdev->dev;
+ struct bnxt_re_mr *mr = NULL;
+ dma_addr_t dma_addr = 0;
+ struct ib_mw *mw;
+ u64 pbl_tbl;
+ int rc;
+
+ dma_addr = dma_map_single(dev, fence->va, BNXT_RE_FENCE_BYTES,
+ DMA_BIDIRECTIONAL);
+ rc = dma_mapping_error(dev, dma_addr);
+ if (rc) {
+ dev_err(rdev_to_dev(rdev), "Failed to dma-map fence-MR-mem\n");
+ rc = -EIO;
+ fence->dma_addr = 0;
+ goto fail;
+ }
+ fence->dma_addr = dma_addr;
+
+ /* Allocate a MR */
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr) {
+ rc = -ENOMEM;
+ goto fail;
+ }
+ fence->mr = mr;
+ mr->rdev = rdev;
+ mr->qplib_mr.pd = &pd->qplib_pd;
+ mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
+ mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
+ rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
+ if (rc) {
+ dev_err(rdev_to_dev(rdev), "Failed to alloc fence-HW-MR\n");
+ goto fail;
+ }
+
+ /* Register MR */
+ mr->ib_mr.lkey = mr->qplib_mr.lkey;
+ mr->qplib_mr.va = (u64)(unsigned long)fence->va;
+ mr->qplib_mr.total_size = BNXT_RE_FENCE_BYTES;
+ pbl_tbl = dma_addr;
+ rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, &pbl_tbl,
+ BNXT_RE_FENCE_PBL_SIZE, false);
+ if (rc) {
+ dev_err(rdev_to_dev(rdev), "Failed to register fence-MR\n");
+ goto fail;
+ }
+ mr->ib_mr.rkey = mr->qplib_mr.rkey;
+
+ /* Create a fence MW only for kernel consumers */
+ mw = bnxt_re_alloc_mw(&pd->ib_pd, IB_MW_TYPE_1, NULL);
+ if (!mw) {
+ dev_err(rdev_to_dev(rdev),
+ "Failed to create fence-MW for PD: %p\n", pd);
+ rc = -EINVAL;
+ goto fail;
+ }
+ fence->mw = mw;
+
+ bnxt_re_create_fence_wqe(pd);
+ return 0;
+
+fail:
+ bnxt_re_destroy_fence_mr(pd);
+ return rc;
+}
+
/* Protection Domains */
int bnxt_re_dealloc_pd(struct ib_pd *ib_pd)
{
@@ -417,6 +611,7 @@
struct bnxt_re_dev *rdev = pd->rdev;
int rc;
+ bnxt_re_destroy_fence_mr(pd);
if (ib_pd->uobject && pd->dpi.dbr) {
struct ib_ucontext *ib_uctx = ib_pd->uobject->context;
struct bnxt_re_ucontext *ucntx;
@@ -498,6 +693,10 @@
}
}
+ if (!udata)
+ if (bnxt_re_create_fence_mr(pd))
+ dev_warn(rdev_to_dev(rdev),
+ "Failed to create Fence-MR\n");
return &pd->ib_pd;
dbfail:
(void)bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl,
@@ -849,12 +1048,16 @@
/* Shadow QP SQ depth should be same as QP1 RQ depth */
qp->qplib_qp.sq.max_wqe = qp1_qp->rq.max_wqe;
qp->qplib_qp.sq.max_sge = 2;
+ /* Q full delta can be 1 since it is internal QP */
+ qp->qplib_qp.sq.q_full_delta = 1;
qp->qplib_qp.scq = qp1_qp->scq;
qp->qplib_qp.rcq = qp1_qp->rcq;
qp->qplib_qp.rq.max_wqe = qp1_qp->rq.max_wqe;
qp->qplib_qp.rq.max_sge = qp1_qp->rq.max_sge;
+ /* Q full delta can be 1 since it is internal QP */
+ qp->qplib_qp.rq.q_full_delta = 1;
qp->qplib_qp.mtu = qp1_qp->mtu;
@@ -917,10 +1120,6 @@
qp->qplib_qp.sig_type = ((qp_init_attr->sq_sig_type ==
IB_SIGNAL_ALL_WR) ? true : false);
- entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr + 1);
- qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
- dev_attr->max_qp_wqes + 1);
-
qp->qplib_qp.sq.max_sge = qp_init_attr->cap.max_send_sge;
if (qp->qplib_qp.sq.max_sge > dev_attr->max_qp_sges)
qp->qplib_qp.sq.max_sge = dev_attr->max_qp_sges;
@@ -959,6 +1158,9 @@
qp->qplib_qp.rq.max_wqe = min_t(u32, entries,
dev_attr->max_qp_wqes + 1);
+ qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe -
+ qp_init_attr->cap.max_recv_wr;
+
qp->qplib_qp.rq.max_sge = qp_init_attr->cap.max_recv_sge;
if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges)
qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
@@ -967,6 +1169,12 @@
qp->qplib_qp.mtu = ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
if (qp_init_attr->qp_type == IB_QPT_GSI) {
+ /* Allocate 1 more than what's provided */
+ entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr + 1);
+ qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
+ dev_attr->max_qp_wqes + 1);
+ qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe -
+ qp_init_attr->cap.max_send_wr;
qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges)
qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
@@ -1006,6 +1214,22 @@
}
} else {
+ /* Allocate 128 + 1 more than what's provided */
+ entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr +
+ BNXT_QPLIB_RESERVED_QP_WRS + 1);
+ qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
+ dev_attr->max_qp_wqes +
+ BNXT_QPLIB_RESERVED_QP_WRS + 1);
+ qp->qplib_qp.sq.q_full_delta = BNXT_QPLIB_RESERVED_QP_WRS + 1;
+
+ /*
+ * Reserving one slot for Phantom WQE. Application can
+ * post one extra entry in this case. But allowing this to avoid
+ * unexpected Queue full condition
+ */
+
+ qp->qplib_qp.sq.q_full_delta -= 1;
+
qp->qplib_qp.max_rd_atomic = dev_attr->max_qp_rd_atom;
qp->qplib_qp.max_dest_rd_atomic = dev_attr->max_qp_init_rd_atom;
if (udata) {
@@ -1025,6 +1249,7 @@
qp->ib_qp.qp_num = qp->qplib_qp.id;
spin_lock_init(&qp->sq_lock);
+ spin_lock_init(&qp->rq_lock);
if (udata) {
struct bnxt_re_qp_resp resp;
@@ -1129,48 +1354,6 @@
}
}
-static int __from_ib_access_flags(int iflags)
-{
- int qflags = 0;
-
- if (iflags & IB_ACCESS_LOCAL_WRITE)
- qflags |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
- if (iflags & IB_ACCESS_REMOTE_READ)
- qflags |= BNXT_QPLIB_ACCESS_REMOTE_READ;
- if (iflags & IB_ACCESS_REMOTE_WRITE)
- qflags |= BNXT_QPLIB_ACCESS_REMOTE_WRITE;
- if (iflags & IB_ACCESS_REMOTE_ATOMIC)
- qflags |= BNXT_QPLIB_ACCESS_REMOTE_ATOMIC;
- if (iflags & IB_ACCESS_MW_BIND)
- qflags |= BNXT_QPLIB_ACCESS_MW_BIND;
- if (iflags & IB_ZERO_BASED)
- qflags |= BNXT_QPLIB_ACCESS_ZERO_BASED;
- if (iflags & IB_ACCESS_ON_DEMAND)
- qflags |= BNXT_QPLIB_ACCESS_ON_DEMAND;
- return qflags;
-};
-
-static enum ib_access_flags __to_ib_access_flags(int qflags)
-{
- enum ib_access_flags iflags = 0;
-
- if (qflags & BNXT_QPLIB_ACCESS_LOCAL_WRITE)
- iflags |= IB_ACCESS_LOCAL_WRITE;
- if (qflags & BNXT_QPLIB_ACCESS_REMOTE_WRITE)
- iflags |= IB_ACCESS_REMOTE_WRITE;
- if (qflags & BNXT_QPLIB_ACCESS_REMOTE_READ)
- iflags |= IB_ACCESS_REMOTE_READ;
- if (qflags & BNXT_QPLIB_ACCESS_REMOTE_ATOMIC)
- iflags |= IB_ACCESS_REMOTE_ATOMIC;
- if (qflags & BNXT_QPLIB_ACCESS_MW_BIND)
- iflags |= IB_ACCESS_MW_BIND;
- if (qflags & BNXT_QPLIB_ACCESS_ZERO_BASED)
- iflags |= IB_ZERO_BASED;
- if (qflags & BNXT_QPLIB_ACCESS_ON_DEMAND)
- iflags |= IB_ACCESS_ON_DEMAND;
- return iflags;
-};
-
static int bnxt_re_modify_shadow_qp(struct bnxt_re_dev *rdev,
struct bnxt_re_qp *qp1_qp,
int qp_attr_mask)
@@ -1378,11 +1561,21 @@
entries = roundup_pow_of_two(qp_attr->cap.max_send_wr);
qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
dev_attr->max_qp_wqes + 1);
+ qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe -
+ qp_attr->cap.max_send_wr;
+ /*
+ * Reserving one slot for Phantom WQE. Some application can
+ * post one extra entry in this case. Allowing this to avoid
+ * unexpected Queue full condition
+ */
+ qp->qplib_qp.sq.q_full_delta -= 1;
qp->qplib_qp.sq.max_sge = qp_attr->cap.max_send_sge;
if (qp->qplib_qp.rq.max_wqe) {
entries = roundup_pow_of_two(qp_attr->cap.max_recv_wr);
qp->qplib_qp.rq.max_wqe =
min_t(u32, entries, dev_attr->max_qp_wqes + 1);
+ qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe -
+ qp_attr->cap.max_recv_wr;
qp->qplib_qp.rq.max_sge = qp_attr->cap.max_recv_sge;
} else {
/* SRQ was used prior, just ignore the RQ caps */
@@ -1883,6 +2076,22 @@
return payload_sz;
}
+static void bnxt_ud_qp_hw_stall_workaround(struct bnxt_re_qp *qp)
+{
+ if ((qp->ib_qp.qp_type == IB_QPT_UD ||
+ qp->ib_qp.qp_type == IB_QPT_GSI ||
+ qp->ib_qp.qp_type == IB_QPT_RAW_ETHERTYPE) &&
+ qp->qplib_qp.wqe_cnt == BNXT_RE_UD_QP_HW_STALL) {
+ int qp_attr_mask;
+ struct ib_qp_attr qp_attr;
+
+ qp_attr_mask = IB_QP_STATE;
+ qp_attr.qp_state = IB_QPS_RTS;
+ bnxt_re_modify_qp(&qp->ib_qp, &qp_attr, qp_attr_mask, NULL);
+ qp->qplib_qp.wqe_cnt = 0;
+ }
+}
+
static int bnxt_re_post_send_shadow_qp(struct bnxt_re_dev *rdev,
struct bnxt_re_qp *qp,
struct ib_send_wr *wr)
@@ -1928,6 +2137,7 @@
wr = wr->next;
}
bnxt_qplib_post_send_db(&qp->qplib_qp);
+ bnxt_ud_qp_hw_stall_workaround(qp);
spin_unlock_irqrestore(&qp->sq_lock, flags);
return rc;
}
@@ -2024,6 +2234,7 @@
wr = wr->next;
}
bnxt_qplib_post_send_db(&qp->qplib_qp);
+ bnxt_ud_qp_hw_stall_workaround(qp);
spin_unlock_irqrestore(&qp->sq_lock, flags);
return rc;
@@ -2071,7 +2282,10 @@
struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
struct bnxt_qplib_swqe wqe;
int rc = 0, payload_sz = 0;
+ unsigned long flags;
+ u32 count = 0;
+ spin_lock_irqsave(&qp->rq_lock, flags);
while (wr) {
/* House keeping */
memset(&wqe, 0, sizeof(wqe));
@@ -2100,9 +2314,21 @@
*bad_wr = wr;
break;
}
+
+ /* Ring DB if the RQEs posted reaches a threshold value */
+ if (++count >= BNXT_RE_RQ_WQE_THRESHOLD) {
+ bnxt_qplib_post_recv_db(&qp->qplib_qp);
+ count = 0;
+ }
+
wr = wr->next;
}
- bnxt_qplib_post_recv_db(&qp->qplib_qp);
+
+ if (count)
+ bnxt_qplib_post_recv_db(&qp->qplib_qp);
+
+ spin_unlock_irqrestore(&qp->rq_lock, flags);
+
return rc;
}
@@ -2643,12 +2869,36 @@
wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
}
+static int send_phantom_wqe(struct bnxt_re_qp *qp)
+{
+ struct bnxt_qplib_qp *lib_qp = &qp->qplib_qp;
+ unsigned long flags;
+ int rc = 0;
+
+ spin_lock_irqsave(&qp->sq_lock, flags);
+
+ rc = bnxt_re_bind_fence_mw(lib_qp);
+ if (!rc) {
+ lib_qp->sq.phantom_wqe_cnt++;
+ dev_dbg(&lib_qp->sq.hwq.pdev->dev,
+ "qp %#x sq->prod %#x sw_prod %#x phantom_wqe_cnt %d\n",
+ lib_qp->id, lib_qp->sq.hwq.prod,
+ HWQ_CMP(lib_qp->sq.hwq.prod, &lib_qp->sq.hwq),
+ lib_qp->sq.phantom_wqe_cnt);
+ }
+
+ spin_unlock_irqrestore(&qp->sq_lock, flags);
+ return rc;
+}
+
int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
{
struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
struct bnxt_re_qp *qp;
struct bnxt_qplib_cqe *cqe;
int i, ncqe, budget;
+ struct bnxt_qplib_q *sq;
+ struct bnxt_qplib_qp *lib_qp;
u32 tbl_idx;
struct bnxt_re_sqp_entries *sqp_entry = NULL;
unsigned long flags;
@@ -2661,7 +2911,21 @@
}
cqe = &cq->cql[0];
while (budget) {
- ncqe = bnxt_qplib_poll_cq(&cq->qplib_cq, cqe, budget);
+ lib_qp = NULL;
+ ncqe = bnxt_qplib_poll_cq(&cq->qplib_cq, cqe, budget, &lib_qp);
+ if (lib_qp) {
+ sq = &lib_qp->sq;
+ if (sq->send_phantom) {
+ qp = container_of(lib_qp,
+ struct bnxt_re_qp, qplib_qp);
+ if (send_phantom_wqe(qp) == -ENOMEM)
+ dev_err(rdev_to_dev(cq->rdev),
+ "Phantom failed! Scheduled to send again\n");
+ else
+ sq->send_phantom = false;
+ }
+ }
+
if (!ncqe)
break;
@@ -2822,6 +3086,12 @@
struct bnxt_re_dev *rdev = mr->rdev;
int rc;
+ rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
+ if (rc) {
+ dev_err(rdev_to_dev(rdev), "Dereg MR failed: %#x\n", rc);
+ return rc;
+ }
+
if (mr->npages && mr->pages) {
rc = bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res,
&mr->qplib_frpl);
@@ -2829,8 +3099,6 @@
mr->npages = 0;
mr->pages = NULL;
}
- rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
-
if (!IS_ERR_OR_NULL(mr->ib_umem))
ib_umem_release(mr->ib_umem);
@@ -2914,97 +3182,52 @@
return ERR_PTR(rc);
}
-/* Fast Memory Regions */
-struct ib_fmr *bnxt_re_alloc_fmr(struct ib_pd *ib_pd, int mr_access_flags,
- struct ib_fmr_attr *fmr_attr)
+struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type,
+ struct ib_udata *udata)
{
struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
struct bnxt_re_dev *rdev = pd->rdev;
- struct bnxt_re_fmr *fmr;
+ struct bnxt_re_mw *mw;
int rc;
- if (fmr_attr->max_pages > MAX_PBL_LVL_2_PGS ||
- fmr_attr->max_maps > rdev->dev_attr.max_map_per_fmr) {
- dev_err(rdev_to_dev(rdev), "Allocate FMR exceeded Max limit");
+ mw = kzalloc(sizeof(*mw), GFP_KERNEL);
+ if (!mw)
return ERR_PTR(-ENOMEM);
- }
- fmr = kzalloc(sizeof(*fmr), GFP_KERNEL);
- if (!fmr)
- return ERR_PTR(-ENOMEM);
+ mw->rdev = rdev;
+ mw->qplib_mw.pd = &pd->qplib_pd;
- fmr->rdev = rdev;
- fmr->qplib_fmr.pd = &pd->qplib_pd;
- fmr->qplib_fmr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
-
- rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &fmr->qplib_fmr);
- if (rc)
+ mw->qplib_mw.type = (type == IB_MW_TYPE_1 ?
+ CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1 :
+ CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B);
+ rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mw->qplib_mw);
+ if (rc) {
+ dev_err(rdev_to_dev(rdev), "Allocate MW failed!");
goto fail;
+ }
+ mw->ib_mw.rkey = mw->qplib_mw.rkey;
- fmr->qplib_fmr.flags = __from_ib_access_flags(mr_access_flags);
- fmr->ib_fmr.lkey = fmr->qplib_fmr.lkey;
- fmr->ib_fmr.rkey = fmr->ib_fmr.lkey;
+ atomic_inc(&rdev->mw_count);
+ return &mw->ib_mw;
- atomic_inc(&rdev->mr_count);
- return &fmr->ib_fmr;
fail:
- kfree(fmr);
+ kfree(mw);
return ERR_PTR(rc);
}
-int bnxt_re_map_phys_fmr(struct ib_fmr *ib_fmr, u64 *page_list, int list_len,
- u64 iova)
+int bnxt_re_dealloc_mw(struct ib_mw *ib_mw)
{
- struct bnxt_re_fmr *fmr = container_of(ib_fmr, struct bnxt_re_fmr,
- ib_fmr);
- struct bnxt_re_dev *rdev = fmr->rdev;
+ struct bnxt_re_mw *mw = container_of(ib_mw, struct bnxt_re_mw, ib_mw);
+ struct bnxt_re_dev *rdev = mw->rdev;
int rc;
- fmr->qplib_fmr.va = iova;
- fmr->qplib_fmr.total_size = list_len * PAGE_SIZE;
-
- rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &fmr->qplib_fmr, page_list,
- list_len, true);
- if (rc)
- dev_err(rdev_to_dev(rdev), "Failed to map FMR for lkey = 0x%x!",
- fmr->ib_fmr.lkey);
- return rc;
-}
-
-int bnxt_re_unmap_fmr(struct list_head *fmr_list)
-{
- struct bnxt_re_dev *rdev;
- struct bnxt_re_fmr *fmr;
- struct ib_fmr *ib_fmr;
- int rc = 0;
-
- /* Validate each FMRs inside the fmr_list */
- list_for_each_entry(ib_fmr, fmr_list, list) {
- fmr = container_of(ib_fmr, struct bnxt_re_fmr, ib_fmr);
- rdev = fmr->rdev;
-
- if (rdev) {
- rc = bnxt_qplib_dereg_mrw(&rdev->qplib_res,
- &fmr->qplib_fmr, true);
- if (rc)
- break;
- }
+ rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mw->qplib_mw);
+ if (rc) {
+ dev_err(rdev_to_dev(rdev), "Free MW failed: %#x\n", rc);
+ return rc;
}
- return rc;
-}
-int bnxt_re_dealloc_fmr(struct ib_fmr *ib_fmr)
-{
- struct bnxt_re_fmr *fmr = container_of(ib_fmr, struct bnxt_re_fmr,
- ib_fmr);
- struct bnxt_re_dev *rdev = fmr->rdev;
- int rc;
-
- rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &fmr->qplib_fmr);
- if (rc)
- dev_err(rdev_to_dev(rdev), "Failed to free FMR");
-
- kfree(fmr);
- atomic_dec(&rdev->mr_count);
+ kfree(mw);
+ atomic_dec(&rdev->mw_count);
return rc;
}
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.h b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
index 5c3d717..6c160f6 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.h
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
@@ -44,11 +44,23 @@
u32 refcnt;
};
+#define BNXT_RE_FENCE_BYTES 64
+struct bnxt_re_fence_data {
+ u32 size;
+ u8 va[BNXT_RE_FENCE_BYTES];
+ dma_addr_t dma_addr;
+ struct bnxt_re_mr *mr;
+ struct ib_mw *mw;
+ struct bnxt_qplib_swqe bind_wqe;
+ u32 bind_rkey;
+};
+
struct bnxt_re_pd {
struct bnxt_re_dev *rdev;
struct ib_pd ib_pd;
struct bnxt_qplib_pd qplib_pd;
struct bnxt_qplib_dpi dpi;
+ struct bnxt_re_fence_data fence;
};
struct bnxt_re_ah {
@@ -62,6 +74,7 @@
struct bnxt_re_dev *rdev;
struct ib_qp ib_qp;
spinlock_t sq_lock; /* protect sq */
+ spinlock_t rq_lock; /* protect rq */
struct bnxt_qplib_qp qplib_qp;
struct ib_umem *sumem;
struct ib_umem *rumem;
@@ -181,12 +194,9 @@
struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type mr_type,
u32 max_num_sg);
int bnxt_re_dereg_mr(struct ib_mr *mr);
-struct ib_fmr *bnxt_re_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
- struct ib_fmr_attr *fmr_attr);
-int bnxt_re_map_phys_fmr(struct ib_fmr *fmr, u64 *page_list, int list_len,
- u64 iova);
-int bnxt_re_unmap_fmr(struct list_head *fmr_list);
-int bnxt_re_dealloc_fmr(struct ib_fmr *fmr);
+struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type,
+ struct ib_udata *udata);
+int bnxt_re_dealloc_mw(struct ib_mw *mw);
struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int mr_access_flags,
struct ib_udata *udata);
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index 5d35540..1fce5e7 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -507,10 +507,6 @@
ibdev->dereg_mr = bnxt_re_dereg_mr;
ibdev->alloc_mr = bnxt_re_alloc_mr;
ibdev->map_mr_sg = bnxt_re_map_mr_sg;
- ibdev->alloc_fmr = bnxt_re_alloc_fmr;
- ibdev->map_phys_fmr = bnxt_re_map_phys_fmr;
- ibdev->unmap_fmr = bnxt_re_unmap_fmr;
- ibdev->dealloc_fmr = bnxt_re_dealloc_fmr;
ibdev->reg_user_mr = bnxt_re_reg_user_mr;
ibdev->alloc_ucontext = bnxt_re_alloc_ucontext;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
index 43d08b5..f05500bc 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
@@ -284,7 +284,7 @@
{
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
struct cmdq_create_qp1 req;
- struct creq_create_qp1_resp *resp;
+ struct creq_create_qp1_resp resp;
struct bnxt_qplib_pbl *pbl;
struct bnxt_qplib_q *sq = &qp->sq;
struct bnxt_qplib_q *rq = &qp->rq;
@@ -394,31 +394,12 @@
req.pd_id = cpu_to_le32(qp->pd->id);
- resp = (struct creq_create_qp1_resp *)
- bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
- NULL, 0);
- if (!resp) {
- dev_err(&res->pdev->dev, "QPLIB: FP: CREATE_QP1 send failed");
- rc = -EINVAL;
+ rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
+ (void *)&resp, NULL, 0);
+ if (rc)
goto fail;
- }
- if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) {
- /* Cmd timed out */
- dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_QP1 timed out");
- rc = -ETIMEDOUT;
- goto fail;
- }
- if (resp->status ||
- le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
- dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_QP1 failed ");
- dev_err(&rcfw->pdev->dev,
- "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
- resp->status, le16_to_cpu(req.cookie),
- le16_to_cpu(resp->cookie));
- rc = -EINVAL;
- goto fail;
- }
- qp->id = le32_to_cpu(resp->xid);
+
+ qp->id = le32_to_cpu(resp.xid);
qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
sq->flush_in_progress = false;
rq->flush_in_progress = false;
@@ -442,7 +423,7 @@
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
struct sq_send *hw_sq_send_hdr, **hw_sq_send_ptr;
struct cmdq_create_qp req;
- struct creq_create_qp_resp *resp;
+ struct creq_create_qp_resp resp;
struct bnxt_qplib_pbl *pbl;
struct sq_psn_search **psn_search_ptr;
unsigned long int psn_search, poff = 0;
@@ -627,31 +608,12 @@
}
req.pd_id = cpu_to_le32(qp->pd->id);
- resp = (struct creq_create_qp_resp *)
- bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
- NULL, 0);
- if (!resp) {
- dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_QP send failed");
- rc = -EINVAL;
+ rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
+ (void *)&resp, NULL, 0);
+ if (rc)
goto fail;
- }
- if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) {
- /* Cmd timed out */
- dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_QP timed out");
- rc = -ETIMEDOUT;
- goto fail;
- }
- if (resp->status ||
- le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
- dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_QP failed ");
- dev_err(&rcfw->pdev->dev,
- "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
- resp->status, le16_to_cpu(req.cookie),
- le16_to_cpu(resp->cookie));
- rc = -EINVAL;
- goto fail;
- }
- qp->id = le32_to_cpu(resp->xid);
+
+ qp->id = le32_to_cpu(resp.xid);
qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
sq->flush_in_progress = false;
rq->flush_in_progress = false;
@@ -769,10 +731,11 @@
{
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
struct cmdq_modify_qp req;
- struct creq_modify_qp_resp *resp;
+ struct creq_modify_qp_resp resp;
u16 cmd_flags = 0, pkey;
u32 temp32[4];
u32 bmask;
+ int rc;
RCFW_CMD_PREP(req, MODIFY_QP, cmd_flags);
@@ -862,27 +825,10 @@
req.vlan_pcp_vlan_dei_vlan_id = cpu_to_le16(qp->vlan_id);
- resp = (struct creq_modify_qp_resp *)
- bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
- NULL, 0);
- if (!resp) {
- dev_err(&rcfw->pdev->dev, "QPLIB: FP: MODIFY_QP send failed");
- return -EINVAL;
- }
- if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) {
- /* Cmd timed out */
- dev_err(&rcfw->pdev->dev, "QPLIB: FP: MODIFY_QP timed out");
- return -ETIMEDOUT;
- }
- if (resp->status ||
- le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
- dev_err(&rcfw->pdev->dev, "QPLIB: FP: MODIFY_QP failed ");
- dev_err(&rcfw->pdev->dev,
- "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
- resp->status, le16_to_cpu(req.cookie),
- le16_to_cpu(resp->cookie));
- return -EINVAL;
- }
+ rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
+ (void *)&resp, NULL, 0);
+ if (rc)
+ return rc;
qp->cur_qp_state = qp->state;
return 0;
}
@@ -891,37 +837,26 @@
{
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
struct cmdq_query_qp req;
- struct creq_query_qp_resp *resp;
+ struct creq_query_qp_resp resp;
+ struct bnxt_qplib_rcfw_sbuf *sbuf;
struct creq_query_qp_resp_sb *sb;
u16 cmd_flags = 0;
u32 temp32[4];
- int i;
+ int i, rc = 0;
RCFW_CMD_PREP(req, QUERY_QP, cmd_flags);
+ sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb));
+ if (!sbuf)
+ return -ENOMEM;
+ sb = sbuf->sb;
+
req.qp_cid = cpu_to_le32(qp->id);
req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS;
- resp = (struct creq_query_qp_resp *)
- bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
- (void **)&sb, 0);
- if (!resp) {
- dev_err(&rcfw->pdev->dev, "QPLIB: FP: QUERY_QP send failed");
- return -EINVAL;
- }
- if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) {
- /* Cmd timed out */
- dev_err(&rcfw->pdev->dev, "QPLIB: FP: QUERY_QP timed out");
- return -ETIMEDOUT;
- }
- if (resp->status ||
- le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
- dev_err(&rcfw->pdev->dev, "QPLIB: FP: QUERY_QP failed ");
- dev_err(&rcfw->pdev->dev,
- "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
- resp->status, le16_to_cpu(req.cookie),
- le16_to_cpu(resp->cookie));
- return -EINVAL;
- }
+ rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
+ (void *)sbuf, 0);
+ if (rc)
+ goto bail;
/* Extract the context from the side buffer */
qp->state = sb->en_sqd_async_notify_state &
CREQ_QUERY_QP_RESP_SB_STATE_MASK;
@@ -976,7 +911,9 @@
qp->dest_qpn = le32_to_cpu(sb->dest_qp_id);
memcpy(qp->smac, sb->src_mac, 6);
qp->vlan_id = le16_to_cpu(sb->vlan_pcp_vlan_dei_vlan_id);
- return 0;
+bail:
+ bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
+ return rc;
}
static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp)
@@ -1021,34 +958,18 @@
{
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
struct cmdq_destroy_qp req;
- struct creq_destroy_qp_resp *resp;
+ struct creq_destroy_qp_resp resp;
unsigned long flags;
u16 cmd_flags = 0;
+ int rc;
RCFW_CMD_PREP(req, DESTROY_QP, cmd_flags);
req.qp_cid = cpu_to_le32(qp->id);
- resp = (struct creq_destroy_qp_resp *)
- bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
- NULL, 0);
- if (!resp) {
- dev_err(&rcfw->pdev->dev, "QPLIB: FP: DESTROY_QP send failed");
- return -EINVAL;
- }
- if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) {
- /* Cmd timed out */
- dev_err(&rcfw->pdev->dev, "QPLIB: FP: DESTROY_QP timed out");
- return -ETIMEDOUT;
- }
- if (resp->status ||
- le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
- dev_err(&rcfw->pdev->dev, "QPLIB: FP: DESTROY_QP failed ");
- dev_err(&rcfw->pdev->dev,
- "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
- resp->status, le16_to_cpu(req.cookie),
- le16_to_cpu(resp->cookie));
- return -EINVAL;
- }
+ rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
+ (void *)&resp, NULL, 0);
+ if (rc)
+ return rc;
/* Must walk the associated CQs to nullified the QP ptr */
spin_lock_irqsave(&qp->scq->hwq.lock, flags);
@@ -1162,8 +1083,12 @@
rc = -EINVAL;
goto done;
}
- if (HWQ_CMP((sq->hwq.prod + 1), &sq->hwq) ==
- HWQ_CMP(sq->hwq.cons, &sq->hwq)) {
+
+ if (bnxt_qplib_queue_full(sq)) {
+ dev_err(&sq->hwq.pdev->dev,
+ "QPLIB: prod = %#x cons = %#x qdepth = %#x delta = %#x",
+ sq->hwq.prod, sq->hwq.cons, sq->hwq.max_elements,
+ sq->q_full_delta);
rc = -ENOMEM;
goto done;
}
@@ -1373,6 +1298,9 @@
}
sq->hwq.prod++;
+
+ qp->wqe_cnt++;
+
done:
return rc;
}
@@ -1411,8 +1339,7 @@
rc = -EINVAL;
goto done;
}
- if (HWQ_CMP((rq->hwq.prod + 1), &rq->hwq) ==
- HWQ_CMP(rq->hwq.cons, &rq->hwq)) {
+ if (bnxt_qplib_queue_full(rq)) {
dev_err(&rq->hwq.pdev->dev,
"QPLIB: FP: QP (0x%x) RQ is full!", qp->id);
rc = -EINVAL;
@@ -1483,7 +1410,7 @@
{
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
struct cmdq_create_cq req;
- struct creq_create_cq_resp *resp;
+ struct creq_create_cq_resp resp;
struct bnxt_qplib_pbl *pbl;
u16 cmd_flags = 0;
int rc;
@@ -1525,30 +1452,12 @@
(cq->cnq_hw_ring_id & CMDQ_CREATE_CQ_CNQ_ID_MASK) <<
CMDQ_CREATE_CQ_CNQ_ID_SFT);
- resp = (struct creq_create_cq_resp *)
- bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
- NULL, 0);
- if (!resp) {
- dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_CQ send failed");
- return -EINVAL;
- }
- if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) {
- /* Cmd timed out */
- dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_CQ timed out");
- rc = -ETIMEDOUT;
+ rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
+ (void *)&resp, NULL, 0);
+ if (rc)
goto fail;
- }
- if (resp->status ||
- le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
- dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_CQ failed ");
- dev_err(&rcfw->pdev->dev,
- "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
- resp->status, le16_to_cpu(req.cookie),
- le16_to_cpu(resp->cookie));
- rc = -EINVAL;
- goto fail;
- }
- cq->id = le32_to_cpu(resp->xid);
+
+ cq->id = le32_to_cpu(resp.xid);
cq->dbr_base = res->dpi_tbl.dbr_bar_reg_iomem;
cq->period = BNXT_QPLIB_QUEUE_START_PERIOD;
init_waitqueue_head(&cq->waitq);
@@ -1566,33 +1475,17 @@
{
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
struct cmdq_destroy_cq req;
- struct creq_destroy_cq_resp *resp;
+ struct creq_destroy_cq_resp resp;
u16 cmd_flags = 0;
+ int rc;
RCFW_CMD_PREP(req, DESTROY_CQ, cmd_flags);
req.cq_cid = cpu_to_le32(cq->id);
- resp = (struct creq_destroy_cq_resp *)
- bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
- NULL, 0);
- if (!resp) {
- dev_err(&rcfw->pdev->dev, "QPLIB: FP: DESTROY_CQ send failed");
- return -EINVAL;
- }
- if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) {
- /* Cmd timed out */
- dev_err(&rcfw->pdev->dev, "QPLIB: FP: DESTROY_CQ timed out");
- return -ETIMEDOUT;
- }
- if (resp->status ||
- le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
- dev_err(&rcfw->pdev->dev, "QPLIB: FP: DESTROY_CQ failed ");
- dev_err(&rcfw->pdev->dev,
- "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
- resp->status, le16_to_cpu(req.cookie),
- le16_to_cpu(resp->cookie));
- return -EINVAL;
- }
+ rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
+ (void *)&resp, NULL, 0);
+ if (rc)
+ return rc;
bnxt_qplib_free_hwq(res->pdev, &cq->hwq);
return 0;
}
@@ -1664,14 +1557,113 @@
return rc;
}
+/* Note: SQE is valid from sw_sq_cons up to cqe_sq_cons (exclusive)
+ * CQE is track from sw_cq_cons to max_element but valid only if VALID=1
+ */
+static int do_wa9060(struct bnxt_qplib_qp *qp, struct bnxt_qplib_cq *cq,
+ u32 cq_cons, u32 sw_sq_cons, u32 cqe_sq_cons)
+{
+ struct bnxt_qplib_q *sq = &qp->sq;
+ struct bnxt_qplib_swq *swq;
+ u32 peek_sw_cq_cons, peek_raw_cq_cons, peek_sq_cons_idx;
+ struct cq_base *peek_hwcqe, **peek_hw_cqe_ptr;
+ struct cq_req *peek_req_hwcqe;
+ struct bnxt_qplib_qp *peek_qp;
+ struct bnxt_qplib_q *peek_sq;
+ int i, rc = 0;
+
+ /* Normal mode */
+ /* Check for the psn_search marking before completing */
+ swq = &sq->swq[sw_sq_cons];
+ if (swq->psn_search &&
+ le32_to_cpu(swq->psn_search->flags_next_psn) & 0x80000000) {
+ /* Unmark */
+ swq->psn_search->flags_next_psn = cpu_to_le32
+ (le32_to_cpu(swq->psn_search->flags_next_psn)
+ & ~0x80000000);
+ dev_dbg(&cq->hwq.pdev->dev,
+ "FP: Process Req cq_cons=0x%x qp=0x%x sq cons sw=0x%x cqe=0x%x marked!\n",
+ cq_cons, qp->id, sw_sq_cons, cqe_sq_cons);
+ sq->condition = true;
+ sq->send_phantom = true;
+
+ /* TODO: Only ARM if the previous SQE is ARMALL */
+ bnxt_qplib_arm_cq(cq, DBR_DBR_TYPE_CQ_ARMALL);
+
+ rc = -EAGAIN;
+ goto out;
+ }
+ if (sq->condition) {
+ /* Peek at the completions */
+ peek_raw_cq_cons = cq->hwq.cons;
+ peek_sw_cq_cons = cq_cons;
+ i = cq->hwq.max_elements;
+ while (i--) {
+ peek_sw_cq_cons = HWQ_CMP((peek_sw_cq_cons), &cq->hwq);
+ peek_hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr;
+ peek_hwcqe = &peek_hw_cqe_ptr[CQE_PG(peek_sw_cq_cons)]
+ [CQE_IDX(peek_sw_cq_cons)];
+ /* If the next hwcqe is VALID */
+ if (CQE_CMP_VALID(peek_hwcqe, peek_raw_cq_cons,
+ cq->hwq.max_elements)) {
+ /* If the next hwcqe is a REQ */
+ if ((peek_hwcqe->cqe_type_toggle &
+ CQ_BASE_CQE_TYPE_MASK) ==
+ CQ_BASE_CQE_TYPE_REQ) {
+ peek_req_hwcqe = (struct cq_req *)
+ peek_hwcqe;
+ peek_qp = (struct bnxt_qplib_qp *)
+ ((unsigned long)
+ le64_to_cpu
+ (peek_req_hwcqe->qp_handle));
+ peek_sq = &peek_qp->sq;
+ peek_sq_cons_idx = HWQ_CMP(le16_to_cpu(
+ peek_req_hwcqe->sq_cons_idx) - 1
+ , &sq->hwq);
+ /* If the hwcqe's sq's wr_id matches */
+ if (peek_sq == sq &&
+ sq->swq[peek_sq_cons_idx].wr_id ==
+ BNXT_QPLIB_FENCE_WRID) {
+ /*
+ * Unbreak only if the phantom
+ * comes back
+ */
+ dev_dbg(&cq->hwq.pdev->dev,
+ "FP:Got Phantom CQE");
+ sq->condition = false;
+ sq->single = true;
+ rc = 0;
+ goto out;
+ }
+ }
+ /* Valid but not the phantom, so keep looping */
+ } else {
+ /* Not valid yet, just exit and wait */
+ rc = -EINVAL;
+ goto out;
+ }
+ peek_sw_cq_cons++;
+ peek_raw_cq_cons++;
+ }
+ dev_err(&cq->hwq.pdev->dev,
+ "Should not have come here! cq_cons=0x%x qp=0x%x sq cons sw=0x%x hw=0x%x",
+ cq_cons, qp->id, sw_sq_cons, cqe_sq_cons);
+ rc = -EINVAL;
+ }
+out:
+ return rc;
+}
+
static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
struct cq_req *hwcqe,
- struct bnxt_qplib_cqe **pcqe, int *budget)
+ struct bnxt_qplib_cqe **pcqe, int *budget,
+ u32 cq_cons, struct bnxt_qplib_qp **lib_qp)
{
struct bnxt_qplib_qp *qp;
struct bnxt_qplib_q *sq;
struct bnxt_qplib_cqe *cqe;
- u32 sw_cons, cqe_cons;
+ u32 sw_sq_cons, cqe_sq_cons;
+ struct bnxt_qplib_swq *swq;
int rc = 0;
qp = (struct bnxt_qplib_qp *)((unsigned long)
@@ -1683,13 +1675,13 @@
}
sq = &qp->sq;
- cqe_cons = HWQ_CMP(le16_to_cpu(hwcqe->sq_cons_idx), &sq->hwq);
- if (cqe_cons > sq->hwq.max_elements) {
+ cqe_sq_cons = HWQ_CMP(le16_to_cpu(hwcqe->sq_cons_idx), &sq->hwq);
+ if (cqe_sq_cons > sq->hwq.max_elements) {
dev_err(&cq->hwq.pdev->dev,
"QPLIB: FP: CQ Process req reported ");
dev_err(&cq->hwq.pdev->dev,
"QPLIB: sq_cons_idx 0x%x which exceeded max 0x%x",
- cqe_cons, sq->hwq.max_elements);
+ cqe_sq_cons, sq->hwq.max_elements);
return -EINVAL;
}
/* If we were in the middle of flushing the SQ, continue */
@@ -1698,53 +1690,74 @@
/* Require to walk the sq's swq to fabricate CQEs for all previously
* signaled SWQEs due to CQE aggregation from the current sq cons
- * to the cqe_cons
+ * to the cqe_sq_cons
*/
cqe = *pcqe;
while (*budget) {
- sw_cons = HWQ_CMP(sq->hwq.cons, &sq->hwq);
- if (sw_cons == cqe_cons)
+ sw_sq_cons = HWQ_CMP(sq->hwq.cons, &sq->hwq);
+ if (sw_sq_cons == cqe_sq_cons)
+ /* Done */
break;
+
+ swq = &sq->swq[sw_sq_cons];
memset(cqe, 0, sizeof(*cqe));
cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
cqe->qp_handle = (u64)(unsigned long)qp;
cqe->src_qp = qp->id;
- cqe->wr_id = sq->swq[sw_cons].wr_id;
- cqe->type = sq->swq[sw_cons].type;
+ cqe->wr_id = swq->wr_id;
+ if (cqe->wr_id == BNXT_QPLIB_FENCE_WRID)
+ goto skip;
+ cqe->type = swq->type;
/* For the last CQE, check for status. For errors, regardless
* of the request being signaled or not, it must complete with
* the hwcqe error status
*/
- if (HWQ_CMP((sw_cons + 1), &sq->hwq) == cqe_cons &&
+ if (HWQ_CMP((sw_sq_cons + 1), &sq->hwq) == cqe_sq_cons &&
hwcqe->status != CQ_REQ_STATUS_OK) {
cqe->status = hwcqe->status;
dev_err(&cq->hwq.pdev->dev,
"QPLIB: FP: CQ Processed Req ");
dev_err(&cq->hwq.pdev->dev,
"QPLIB: wr_id[%d] = 0x%llx with status 0x%x",
- sw_cons, cqe->wr_id, cqe->status);
+ sw_sq_cons, cqe->wr_id, cqe->status);
cqe++;
(*budget)--;
sq->flush_in_progress = true;
/* Must block new posting of SQ and RQ */
qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
+ sq->condition = false;
+ sq->single = false;
} else {
- if (sq->swq[sw_cons].flags &
- SQ_SEND_FLAGS_SIGNAL_COMP) {
+ if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
+ /* Before we complete, do WA 9060 */
+ if (do_wa9060(qp, cq, cq_cons, sw_sq_cons,
+ cqe_sq_cons)) {
+ *lib_qp = qp;
+ goto out;
+ }
cqe->status = CQ_REQ_STATUS_OK;
cqe++;
(*budget)--;
}
}
+skip:
sq->hwq.cons++;
+ if (sq->single)
+ break;
}
+out:
*pcqe = cqe;
- if (!*budget && HWQ_CMP(sq->hwq.cons, &sq->hwq) != cqe_cons) {
+ if (HWQ_CMP(sq->hwq.cons, &sq->hwq) != cqe_sq_cons) {
/* Out of budget */
rc = -EAGAIN;
goto done;
}
+ /*
+ * Back to normal completion mode only after it has completed all of
+ * the WC for this CQE
+ */
+ sq->single = false;
if (!sq->flush_in_progress)
goto done;
flush:
@@ -2074,7 +2087,7 @@
}
int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
- int num_cqes)
+ int num_cqes, struct bnxt_qplib_qp **lib_qp)
{
struct cq_base *hw_cqe, **hw_cqe_ptr;
unsigned long flags;
@@ -2099,7 +2112,8 @@
case CQ_BASE_CQE_TYPE_REQ:
rc = bnxt_qplib_cq_process_req(cq,
(struct cq_req *)hw_cqe,
- &cqe, &budget);
+ &cqe, &budget,
+ sw_cons, lib_qp);
break;
case CQ_BASE_CQE_TYPE_RES_RC:
rc = bnxt_qplib_cq_process_res_rc(cq,
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.h b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
index f0150f8..36b7b7d 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
@@ -88,6 +88,7 @@
struct bnxt_qplib_swqe {
/* General */
+#define BNXT_QPLIB_FENCE_WRID 0x46454E43 /* "FENC" */
u64 wr_id;
u8 reqs_type;
u8 type;
@@ -216,9 +217,16 @@
struct scatterlist *sglist;
u32 nmap;
u32 max_wqe;
+ u16 q_full_delta;
u16 max_sge;
u32 psn;
bool flush_in_progress;
+ bool condition;
+ bool single;
+ bool send_phantom;
+ u32 phantom_wqe_cnt;
+ u32 phantom_cqe_cnt;
+ u32 next_cq_cons;
};
struct bnxt_qplib_qp {
@@ -242,6 +250,7 @@
u8 timeout;
u8 retry_cnt;
u8 rnr_retry;
+ u64 wqe_cnt;
u32 min_rnr_timer;
u32 max_rd_atomic;
u32 max_dest_rd_atomic;
@@ -301,6 +310,13 @@
(!!((hdr)->cqe_type_toggle & CQ_BASE_TOGGLE) == \
!((raw_cons) & (cp_bit)))
+static inline bool bnxt_qplib_queue_full(struct bnxt_qplib_q *qplib_q)
+{
+ return HWQ_CMP((qplib_q->hwq.prod + qplib_q->q_full_delta),
+ &qplib_q->hwq) == HWQ_CMP(qplib_q->hwq.cons,
+ &qplib_q->hwq);
+}
+
struct bnxt_qplib_cqe {
u8 status;
u8 type;
@@ -432,7 +448,7 @@
int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq);
int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq);
int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
- int num);
+ int num, struct bnxt_qplib_qp **qp);
void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type);
void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq);
int bnxt_qplib_alloc_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
index 23fb726..16e4275 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
@@ -39,72 +39,55 @@
#include <linux/spinlock.h>
#include <linux/pci.h>
#include <linux/prefetch.h>
+#include <linux/delay.h>
+
#include "roce_hsi.h"
#include "qplib_res.h"
#include "qplib_rcfw.h"
static void bnxt_qplib_service_creq(unsigned long data);
/* Hardware communication channel */
-int bnxt_qplib_rcfw_wait_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie)
+static int __wait_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie)
{
u16 cbit;
int rc;
- cookie &= RCFW_MAX_COOKIE_VALUE;
cbit = cookie % RCFW_MAX_OUTSTANDING_CMD;
- if (!test_bit(cbit, rcfw->cmdq_bitmap))
- dev_warn(&rcfw->pdev->dev,
- "QPLIB: CMD bit %d for cookie 0x%x is not set?",
- cbit, cookie);
-
rc = wait_event_timeout(rcfw->waitq,
!test_bit(cbit, rcfw->cmdq_bitmap),
msecs_to_jiffies(RCFW_CMD_WAIT_TIME_MS));
- if (!rc) {
- dev_warn(&rcfw->pdev->dev,
- "QPLIB: Bono Error: timeout %d msec, msg {0x%x}\n",
- RCFW_CMD_WAIT_TIME_MS, cookie);
- }
-
- return rc;
+ return rc ? 0 : -ETIMEDOUT;
};
-int bnxt_qplib_rcfw_block_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie)
+static int __block_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie)
{
- u32 count = -1;
+ u32 count = RCFW_BLOCKED_CMD_WAIT_COUNT;
u16 cbit;
- cookie &= RCFW_MAX_COOKIE_VALUE;
cbit = cookie % RCFW_MAX_OUTSTANDING_CMD;
if (!test_bit(cbit, rcfw->cmdq_bitmap))
goto done;
do {
+ mdelay(1); /* 1m sec */
bnxt_qplib_service_creq((unsigned long)rcfw);
} while (test_bit(cbit, rcfw->cmdq_bitmap) && --count);
done:
- return count;
+ return count ? 0 : -ETIMEDOUT;
};
-void *bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
- struct cmdq_base *req, void **crsbe,
- u8 is_block)
+static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req,
+ struct creq_base *resp, void *sb, u8 is_block)
{
- struct bnxt_qplib_crsq *crsq = &rcfw->crsq;
struct bnxt_qplib_cmdqe *cmdqe, **cmdq_ptr;
struct bnxt_qplib_hwq *cmdq = &rcfw->cmdq;
- struct bnxt_qplib_hwq *crsb = &rcfw->crsb;
- struct bnxt_qplib_crsqe *crsqe = NULL;
- struct bnxt_qplib_crsbe **crsb_ptr;
+ struct bnxt_qplib_crsq *crsqe;
u32 sw_prod, cmdq_prod;
- u8 retry_cnt = 0xFF;
- dma_addr_t dma_addr;
unsigned long flags;
u32 size, opcode;
u16 cookie, cbit;
int pg, idx;
u8 *preq;
-retry:
opcode = req->opcode;
if (!test_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags) &&
(opcode != CMDQ_BASE_OPCODE_QUERY_FUNC &&
@@ -112,63 +95,50 @@
dev_err(&rcfw->pdev->dev,
"QPLIB: RCFW not initialized, reject opcode 0x%x",
opcode);
- return NULL;
+ return -EINVAL;
}
if (test_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags) &&
opcode == CMDQ_BASE_OPCODE_INITIALIZE_FW) {
dev_err(&rcfw->pdev->dev, "QPLIB: RCFW already initialized!");
- return NULL;
+ return -EINVAL;
}
/* Cmdq are in 16-byte units, each request can consume 1 or more
* cmdqe
*/
spin_lock_irqsave(&cmdq->lock, flags);
- if (req->cmd_size > cmdq->max_elements -
- ((HWQ_CMP(cmdq->prod, cmdq) - HWQ_CMP(cmdq->cons, cmdq)) &
- (cmdq->max_elements - 1))) {
+ if (req->cmd_size >= HWQ_FREE_SLOTS(cmdq)) {
dev_err(&rcfw->pdev->dev, "QPLIB: RCFW: CMDQ is full!");
spin_unlock_irqrestore(&cmdq->lock, flags);
-
- if (!retry_cnt--)
- return NULL;
- goto retry;
+ return -EAGAIN;
}
- retry_cnt = 0xFF;
- cookie = atomic_inc_return(&rcfw->seq_num) & RCFW_MAX_COOKIE_VALUE;
+ cookie = rcfw->seq_num & RCFW_MAX_COOKIE_VALUE;
cbit = cookie % RCFW_MAX_OUTSTANDING_CMD;
if (is_block)
cookie |= RCFW_CMD_IS_BLOCKING;
+
+ set_bit(cbit, rcfw->cmdq_bitmap);
req->cookie = cpu_to_le16(cookie);
- if (test_and_set_bit(cbit, rcfw->cmdq_bitmap)) {
- dev_err(&rcfw->pdev->dev,
- "QPLIB: RCFW MAX outstanding cmd reached!");
- atomic_dec(&rcfw->seq_num);
+ crsqe = &rcfw->crsqe_tbl[cbit];
+ if (crsqe->resp) {
spin_unlock_irqrestore(&cmdq->lock, flags);
-
- if (!retry_cnt--)
- return NULL;
- goto retry;
+ return -EBUSY;
}
- /* Reserve a resp buffer slot if requested */
- if (req->resp_size && crsbe) {
- spin_lock(&crsb->lock);
- sw_prod = HWQ_CMP(crsb->prod, crsb);
- crsb_ptr = (struct bnxt_qplib_crsbe **)crsb->pbl_ptr;
- *crsbe = (void *)&crsb_ptr[get_crsb_pg(sw_prod)]
- [get_crsb_idx(sw_prod)];
- bnxt_qplib_crsb_dma_next(crsb->pbl_dma_ptr, sw_prod, &dma_addr);
- req->resp_addr = cpu_to_le64(dma_addr);
- crsb->prod++;
- spin_unlock(&crsb->lock);
+ memset(resp, 0, sizeof(*resp));
+ crsqe->resp = (struct creq_qp_event *)resp;
+ crsqe->resp->cookie = req->cookie;
+ crsqe->req_size = req->cmd_size;
+ if (req->resp_size && sb) {
+ struct bnxt_qplib_rcfw_sbuf *sbuf = sb;
- req->resp_size = (sizeof(struct bnxt_qplib_crsbe) +
- BNXT_QPLIB_CMDQE_UNITS - 1) /
- BNXT_QPLIB_CMDQE_UNITS;
+ req->resp_addr = cpu_to_le64(sbuf->dma_addr);
+ req->resp_size = (sbuf->size + BNXT_QPLIB_CMDQE_UNITS - 1) /
+ BNXT_QPLIB_CMDQE_UNITS;
}
+
cmdq_ptr = (struct bnxt_qplib_cmdqe **)cmdq->pbl_ptr;
preq = (u8 *)req;
size = req->cmd_size * BNXT_QPLIB_CMDQE_UNITS;
@@ -190,23 +160,24 @@
preq += min_t(u32, size, sizeof(*cmdqe));
size -= min_t(u32, size, sizeof(*cmdqe));
cmdq->prod++;
+ rcfw->seq_num++;
} while (size > 0);
+ rcfw->seq_num++;
+
cmdq_prod = cmdq->prod;
if (rcfw->flags & FIRMWARE_FIRST_FLAG) {
- /* The very first doorbell write is required to set this flag
- * which prompts the FW to reset its internal pointers
+ /* The very first doorbell write
+ * is required to set this flag
+ * which prompts the FW to reset
+ * its internal pointers
*/
cmdq_prod |= FIRMWARE_FIRST_FLAG;
rcfw->flags &= ~FIRMWARE_FIRST_FLAG;
}
- sw_prod = HWQ_CMP(crsq->prod, crsq);
- crsqe = &crsq->crsq[sw_prod];
- memset(crsqe, 0, sizeof(*crsqe));
- crsq->prod++;
- crsqe->req_size = req->cmd_size;
/* ring CMDQ DB */
+ wmb();
writel(cmdq_prod, rcfw->cmdq_bar_reg_iomem +
rcfw->cmdq_bar_reg_prod_off);
writel(RCFW_CMDQ_TRIG_VAL, rcfw->cmdq_bar_reg_iomem +
@@ -214,9 +185,56 @@
done:
spin_unlock_irqrestore(&cmdq->lock, flags);
/* Return the CREQ response pointer */
- return crsqe ? &crsqe->qp_event : NULL;
+ return 0;
}
+int bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
+ struct cmdq_base *req,
+ struct creq_base *resp,
+ void *sb, u8 is_block)
+{
+ struct creq_qp_event *evnt = (struct creq_qp_event *)resp;
+ u16 cookie;
+ u8 opcode, retry_cnt = 0xFF;
+ int rc = 0;
+
+ do {
+ opcode = req->opcode;
+ rc = __send_message(rcfw, req, resp, sb, is_block);
+ cookie = le16_to_cpu(req->cookie) & RCFW_MAX_COOKIE_VALUE;
+ if (!rc)
+ break;
+
+ if (!retry_cnt || (rc != -EAGAIN && rc != -EBUSY)) {
+ /* send failed */
+ dev_err(&rcfw->pdev->dev, "QPLIB: cmdq[%#x]=%#x send failed",
+ cookie, opcode);
+ return rc;
+ }
+ is_block ? mdelay(1) : usleep_range(500, 1000);
+
+ } while (retry_cnt--);
+
+ if (is_block)
+ rc = __block_for_resp(rcfw, cookie);
+ else
+ rc = __wait_for_resp(rcfw, cookie);
+ if (rc) {
+ /* timed out */
+ dev_err(&rcfw->pdev->dev, "QPLIB: cmdq[%#x]=%#x timedout (%d)msec",
+ cookie, opcode, RCFW_CMD_WAIT_TIME_MS);
+ return rc;
+ }
+
+ if (evnt->status) {
+ /* failed with status */
+ dev_err(&rcfw->pdev->dev, "QPLIB: cmdq[%#x]=%#x status %#x",
+ cookie, opcode, evnt->status);
+ rc = -EFAULT;
+ }
+
+ return rc;
+}
/* Completions */
static int bnxt_qplib_process_func_event(struct bnxt_qplib_rcfw *rcfw,
struct creq_func_event *func_event)
@@ -260,12 +278,12 @@
static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
struct creq_qp_event *qp_event)
{
- struct bnxt_qplib_crsq *crsq = &rcfw->crsq;
struct bnxt_qplib_hwq *cmdq = &rcfw->cmdq;
- struct bnxt_qplib_crsqe *crsqe;
- u16 cbit, cookie, blocked = 0;
+ struct bnxt_qplib_crsq *crsqe;
unsigned long flags;
- u32 sw_cons;
+ u16 cbit, blocked = 0;
+ u16 cookie;
+ __le16 mcookie;
switch (qp_event->event) {
case CREQ_QP_EVENT_EVENT_QP_ERROR_NOTIFICATION:
@@ -275,24 +293,31 @@
default:
/* Command Response */
spin_lock_irqsave(&cmdq->lock, flags);
- sw_cons = HWQ_CMP(crsq->cons, crsq);
- crsqe = &crsq->crsq[sw_cons];
- crsq->cons++;
- memcpy(&crsqe->qp_event, qp_event, sizeof(crsqe->qp_event));
-
- cookie = le16_to_cpu(crsqe->qp_event.cookie);
+ cookie = le16_to_cpu(qp_event->cookie);
+ mcookie = qp_event->cookie;
blocked = cookie & RCFW_CMD_IS_BLOCKING;
cookie &= RCFW_MAX_COOKIE_VALUE;
cbit = cookie % RCFW_MAX_OUTSTANDING_CMD;
+ crsqe = &rcfw->crsqe_tbl[cbit];
+ if (crsqe->resp &&
+ crsqe->resp->cookie == mcookie) {
+ memcpy(crsqe->resp, qp_event, sizeof(*qp_event));
+ crsqe->resp = NULL;
+ } else {
+ dev_err(&rcfw->pdev->dev,
+ "QPLIB: CMD %s resp->cookie = %#x, evnt->cookie = %#x",
+ crsqe->resp ? "mismatch" : "collision",
+ crsqe->resp ? crsqe->resp->cookie : 0, mcookie);
+ }
if (!test_and_clear_bit(cbit, rcfw->cmdq_bitmap))
dev_warn(&rcfw->pdev->dev,
"QPLIB: CMD bit %d was not requested", cbit);
-
cmdq->cons += crsqe->req_size;
- spin_unlock_irqrestore(&cmdq->lock, flags);
+ crsqe->req_size = 0;
+
if (!blocked)
wake_up(&rcfw->waitq);
- break;
+ spin_unlock_irqrestore(&cmdq->lock, flags);
}
return 0;
}
@@ -305,12 +330,12 @@
struct creq_base *creqe, **creq_ptr;
u32 sw_cons, raw_cons;
unsigned long flags;
- u32 type;
+ u32 type, budget = CREQ_ENTRY_POLL_BUDGET;
- /* Service the CREQ until empty */
+ /* Service the CREQ until budget is over */
spin_lock_irqsave(&creq->lock, flags);
raw_cons = creq->cons;
- while (1) {
+ while (budget > 0) {
sw_cons = HWQ_CMP(raw_cons, creq);
creq_ptr = (struct creq_base **)creq->pbl_ptr;
creqe = &creq_ptr[get_creq_pg(sw_cons)][get_creq_idx(sw_cons)];
@@ -320,15 +345,9 @@
type = creqe->type & CREQ_BASE_TYPE_MASK;
switch (type) {
case CREQ_BASE_TYPE_QP_EVENT:
- if (!bnxt_qplib_process_qp_event
- (rcfw, (struct creq_qp_event *)creqe))
- rcfw->creq_qp_event_processed++;
- else {
- dev_warn(&rcfw->pdev->dev, "QPLIB: crsqe with");
- dev_warn(&rcfw->pdev->dev,
- "QPLIB: type = 0x%x not handled",
- type);
- }
+ bnxt_qplib_process_qp_event
+ (rcfw, (struct creq_qp_event *)creqe);
+ rcfw->creq_qp_event_processed++;
break;
case CREQ_BASE_TYPE_FUNC_EVENT:
if (!bnxt_qplib_process_func_event
@@ -346,7 +365,9 @@
break;
}
raw_cons++;
+ budget--;
}
+
if (creq->cons != raw_cons) {
creq->cons = raw_cons;
CREQ_DB_REARM(rcfw->creq_bar_reg_iomem, raw_cons,
@@ -375,23 +396,16 @@
/* RCFW */
int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw *rcfw)
{
- struct creq_deinitialize_fw_resp *resp;
struct cmdq_deinitialize_fw req;
+ struct creq_deinitialize_fw_resp resp;
u16 cmd_flags = 0;
+ int rc;
RCFW_CMD_PREP(req, DEINITIALIZE_FW, cmd_flags);
- resp = (struct creq_deinitialize_fw_resp *)
- bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
- NULL, 0);
- if (!resp)
- return -EINVAL;
-
- if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie)))
- return -ETIMEDOUT;
-
- if (resp->status ||
- le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie))
- return -EFAULT;
+ rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
+ NULL, 0);
+ if (rc)
+ return rc;
clear_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags);
return 0;
@@ -417,9 +431,10 @@
int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw,
struct bnxt_qplib_ctx *ctx, int is_virtfn)
{
- struct creq_initialize_fw_resp *resp;
struct cmdq_initialize_fw req;
+ struct creq_initialize_fw_resp resp;
u16 cmd_flags = 0, level;
+ int rc;
RCFW_CMD_PREP(req, INITIALIZE_FW, cmd_flags);
@@ -482,37 +497,19 @@
skip_ctx_setup:
req.stat_ctx_id = cpu_to_le32(ctx->stats.fw_id);
- resp = (struct creq_initialize_fw_resp *)
- bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
- NULL, 0);
- if (!resp) {
- dev_err(&rcfw->pdev->dev,
- "QPLIB: RCFW: INITIALIZE_FW send failed");
- return -EINVAL;
- }
- if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) {
- /* Cmd timed out */
- dev_err(&rcfw->pdev->dev,
- "QPLIB: RCFW: INITIALIZE_FW timed out");
- return -ETIMEDOUT;
- }
- if (resp->status ||
- le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
- dev_err(&rcfw->pdev->dev,
- "QPLIB: RCFW: INITIALIZE_FW failed");
- return -EINVAL;
- }
+ rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
+ NULL, 0);
+ if (rc)
+ return rc;
set_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags);
return 0;
}
void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw)
{
- bnxt_qplib_free_hwq(rcfw->pdev, &rcfw->crsb);
- kfree(rcfw->crsq.crsq);
+ kfree(rcfw->crsqe_tbl);
bnxt_qplib_free_hwq(rcfw->pdev, &rcfw->cmdq);
bnxt_qplib_free_hwq(rcfw->pdev, &rcfw->creq);
-
rcfw->pdev = NULL;
}
@@ -539,21 +536,11 @@
goto fail;
}
- rcfw->crsq.max_elements = rcfw->cmdq.max_elements;
- rcfw->crsq.crsq = kcalloc(rcfw->crsq.max_elements,
- sizeof(*rcfw->crsq.crsq), GFP_KERNEL);
- if (!rcfw->crsq.crsq)
+ rcfw->crsqe_tbl = kcalloc(rcfw->cmdq.max_elements,
+ sizeof(*rcfw->crsqe_tbl), GFP_KERNEL);
+ if (!rcfw->crsqe_tbl)
goto fail;
- rcfw->crsb.max_elements = BNXT_QPLIB_CRSBE_MAX_CNT;
- if (bnxt_qplib_alloc_init_hwq(rcfw->pdev, &rcfw->crsb, NULL, 0,
- &rcfw->crsb.max_elements,
- BNXT_QPLIB_CRSBE_UNITS, 0, PAGE_SIZE,
- HWQ_TYPE_CTX)) {
- dev_err(&rcfw->pdev->dev,
- "QPLIB: HW channel CRSB allocation failed");
- goto fail;
- }
return 0;
fail:
@@ -606,7 +593,7 @@
int rc;
/* General */
- atomic_set(&rcfw->seq_num, 0);
+ rcfw->seq_num = 0;
rcfw->flags = FIRMWARE_FIRST_FLAG;
bmap_size = BITS_TO_LONGS(RCFW_MAX_OUTSTANDING_CMD *
sizeof(unsigned long));
@@ -636,10 +623,6 @@
rcfw->cmdq_bar_reg_trig_off = RCFW_COMM_TRIG_OFFSET;
- /* CRSQ */
- rcfw->crsq.prod = 0;
- rcfw->crsq.cons = 0;
-
/* CREQ */
rcfw->creq_bar_reg = RCFW_COMM_CONS_PCI_BAR_REGION;
res_base = pci_resource_start(pdev, rcfw->creq_bar_reg);
@@ -692,3 +675,34 @@
__iowrite32_copy(rcfw->cmdq_bar_reg_iomem, &init, sizeof(init) / 4);
return 0;
}
+
+struct bnxt_qplib_rcfw_sbuf *bnxt_qplib_rcfw_alloc_sbuf(
+ struct bnxt_qplib_rcfw *rcfw,
+ u32 size)
+{
+ struct bnxt_qplib_rcfw_sbuf *sbuf;
+
+ sbuf = kzalloc(sizeof(*sbuf), GFP_ATOMIC);
+ if (!sbuf)
+ return NULL;
+
+ sbuf->size = size;
+ sbuf->sb = dma_zalloc_coherent(&rcfw->pdev->dev, sbuf->size,
+ &sbuf->dma_addr, GFP_ATOMIC);
+ if (!sbuf->sb)
+ goto bail;
+
+ return sbuf;
+bail:
+ kfree(sbuf);
+ return NULL;
+}
+
+void bnxt_qplib_rcfw_free_sbuf(struct bnxt_qplib_rcfw *rcfw,
+ struct bnxt_qplib_rcfw_sbuf *sbuf)
+{
+ if (sbuf->sb)
+ dma_free_coherent(&rcfw->pdev->dev, sbuf->size,
+ sbuf->sb, sbuf->dma_addr);
+ kfree(sbuf);
+}
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
index d3567d7..09ce121 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
@@ -73,6 +73,7 @@
#define RCFW_MAX_OUTSTANDING_CMD BNXT_QPLIB_CMDQE_MAX_CNT
#define RCFW_MAX_COOKIE_VALUE 0x7FFF
#define RCFW_CMD_IS_BLOCKING 0x8000
+#define RCFW_BLOCKED_CMD_WAIT_COUNT 0x4E20
/* Cmdq contains a fix number of a 16-Byte slots */
struct bnxt_qplib_cmdqe {
@@ -94,32 +95,6 @@
u8 data[1024];
};
-/* CRSQ SB */
-#define BNXT_QPLIB_CRSBE_MAX_CNT 4
-#define BNXT_QPLIB_CRSBE_UNITS sizeof(struct bnxt_qplib_crsbe)
-#define BNXT_QPLIB_CRSBE_CNT_PER_PG (PAGE_SIZE / BNXT_QPLIB_CRSBE_UNITS)
-
-#define MAX_CRSB_IDX (BNXT_QPLIB_CRSBE_MAX_CNT - 1)
-#define MAX_CRSB_IDX_PER_PG (BNXT_QPLIB_CRSBE_CNT_PER_PG - 1)
-
-static inline u32 get_crsb_pg(u32 val)
-{
- return (val & ~MAX_CRSB_IDX_PER_PG) / BNXT_QPLIB_CRSBE_CNT_PER_PG;
-}
-
-static inline u32 get_crsb_idx(u32 val)
-{
- return val & MAX_CRSB_IDX_PER_PG;
-}
-
-static inline void bnxt_qplib_crsb_dma_next(dma_addr_t *pg_map_arr,
- u32 prod, dma_addr_t *dma_addr)
-{
- *dma_addr = pg_map_arr[(prod) / BNXT_QPLIB_CRSBE_CNT_PER_PG];
- *dma_addr += ((prod) % BNXT_QPLIB_CRSBE_CNT_PER_PG) *
- BNXT_QPLIB_CRSBE_UNITS;
-}
-
/* CREQ */
/* Allocate 1 per QP for async error notification for now */
#define BNXT_QPLIB_CREQE_MAX_CNT (64 * 1024)
@@ -158,17 +133,19 @@
#define CREQ_DB(db, raw_cons, cp_bit) \
writel(CREQ_DB_CP_FLAGS | ((raw_cons) & ((cp_bit) - 1)), db)
+#define CREQ_ENTRY_POLL_BUDGET 0x100
+
/* HWQ */
-struct bnxt_qplib_crsqe {
- struct creq_qp_event qp_event;
+
+struct bnxt_qplib_crsq {
+ struct creq_qp_event *resp;
u32 req_size;
};
-struct bnxt_qplib_crsq {
- struct bnxt_qplib_crsqe *crsq;
- u32 prod;
- u32 cons;
- u32 max_elements;
+struct bnxt_qplib_rcfw_sbuf {
+ void *sb;
+ dma_addr_t dma_addr;
+ u32 size;
};
/* RCFW Communication Channels */
@@ -185,7 +162,7 @@
wait_queue_head_t waitq;
int (*aeq_handler)(struct bnxt_qplib_rcfw *,
struct creq_func_event *);
- atomic_t seq_num;
+ u32 seq_num;
/* Bar region info */
void __iomem *cmdq_bar_reg_iomem;
@@ -203,8 +180,7 @@
/* Actual Cmd and Resp Queues */
struct bnxt_qplib_hwq cmdq;
- struct bnxt_qplib_crsq crsq;
- struct bnxt_qplib_hwq crsb;
+ struct bnxt_qplib_crsq *crsqe_tbl;
};
void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw);
@@ -219,11 +195,14 @@
(struct bnxt_qplib_rcfw *,
struct creq_func_event *));
-int bnxt_qplib_rcfw_block_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie);
-int bnxt_qplib_rcfw_wait_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie);
-void *bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
- struct cmdq_base *req, void **crsbe,
- u8 is_block);
+struct bnxt_qplib_rcfw_sbuf *bnxt_qplib_rcfw_alloc_sbuf(
+ struct bnxt_qplib_rcfw *rcfw,
+ u32 size);
+void bnxt_qplib_rcfw_free_sbuf(struct bnxt_qplib_rcfw *rcfw,
+ struct bnxt_qplib_rcfw_sbuf *sbuf);
+int bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
+ struct cmdq_base *req, struct creq_base *resp,
+ void *sbuf, u8 is_block);
int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw *rcfw);
int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw,
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.h b/drivers/infiniband/hw/bnxt_re/qplib_res.h
index 6277d80..2e48555 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.h
@@ -48,6 +48,10 @@
#define HWQ_CMP(idx, hwq) ((idx) & ((hwq)->max_elements - 1))
+#define HWQ_FREE_SLOTS(hwq) (hwq->max_elements - \
+ ((HWQ_CMP(hwq->prod, hwq)\
+ - HWQ_CMP(hwq->cons, hwq))\
+ & (hwq->max_elements - 1)))
enum bnxt_qplib_hwq_type {
HWQ_TYPE_CTX,
HWQ_TYPE_QUEUE,
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
index 7b31ecc..fde18cf 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
@@ -55,37 +55,30 @@
struct bnxt_qplib_dev_attr *attr)
{
struct cmdq_query_func req;
- struct creq_query_func_resp *resp;
+ struct creq_query_func_resp resp;
+ struct bnxt_qplib_rcfw_sbuf *sbuf;
struct creq_query_func_resp_sb *sb;
u16 cmd_flags = 0;
u32 temp;
u8 *tqm_alloc;
- int i;
+ int i, rc = 0;
RCFW_CMD_PREP(req, QUERY_FUNC, cmd_flags);
- req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS;
- resp = (struct creq_query_func_resp *)
- bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void **)&sb,
- 0);
- if (!resp) {
- dev_err(&rcfw->pdev->dev, "QPLIB: SP: QUERY_FUNC send failed");
- return -EINVAL;
- }
- if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) {
- /* Cmd timed out */
- dev_err(&rcfw->pdev->dev, "QPLIB: SP: QUERY_FUNC timed out");
- return -ETIMEDOUT;
- }
- if (resp->status ||
- le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
- dev_err(&rcfw->pdev->dev, "QPLIB: SP: QUERY_FUNC failed ");
+ sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb));
+ if (!sbuf) {
dev_err(&rcfw->pdev->dev,
- "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
- resp->status, le16_to_cpu(req.cookie),
- le16_to_cpu(resp->cookie));
- return -EINVAL;
+ "QPLIB: SP: QUERY_FUNC alloc side buffer failed");
+ return -ENOMEM;
}
+
+ sb = sbuf->sb;
+ req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS;
+ rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
+ (void *)sbuf, 0);
+ if (rc)
+ goto bail;
+
/* Extract the context from the side buffer */
attr->max_qp = le32_to_cpu(sb->max_qp);
attr->max_qp_rd_atom =
@@ -95,6 +88,11 @@
sb->max_qp_init_rd_atom > BNXT_QPLIB_MAX_OUT_RD_ATOM ?
BNXT_QPLIB_MAX_OUT_RD_ATOM : sb->max_qp_init_rd_atom;
attr->max_qp_wqes = le16_to_cpu(sb->max_qp_wr);
+ /*
+ * 128 WQEs needs to be reserved for the HW (8916). Prevent
+ * reporting the max number
+ */
+ attr->max_qp_wqes -= BNXT_QPLIB_RESERVED_QP_WRS;
attr->max_qp_sges = sb->max_sge;
attr->max_cq = le32_to_cpu(sb->max_cq);
attr->max_cq_wqes = le32_to_cpu(sb->max_cqe);
@@ -130,7 +128,10 @@
attr->tqm_alloc_reqs[i * 4 + 2] = *(++tqm_alloc);
attr->tqm_alloc_reqs[i * 4 + 3] = *(++tqm_alloc);
}
- return 0;
+
+bail:
+ bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
+ return rc;
}
/* SGID */
@@ -178,8 +179,9 @@
/* Remove GID from the SGID table */
if (update) {
struct cmdq_delete_gid req;
- struct creq_delete_gid_resp *resp;
+ struct creq_delete_gid_resp resp;
u16 cmd_flags = 0;
+ int rc;
RCFW_CMD_PREP(req, DELETE_GID, cmd_flags);
if (sgid_tbl->hw_id[index] == 0xFFFF) {
@@ -188,31 +190,10 @@
return -EINVAL;
}
req.gid_index = cpu_to_le16(sgid_tbl->hw_id[index]);
- resp = (struct creq_delete_gid_resp *)
- bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, NULL,
- 0);
- if (!resp) {
- dev_err(&res->pdev->dev,
- "QPLIB: SP: DELETE_GID send failed");
- return -EINVAL;
- }
- if (!bnxt_qplib_rcfw_wait_for_resp(rcfw,
- le16_to_cpu(req.cookie))) {
- /* Cmd timed out */
- dev_err(&res->pdev->dev,
- "QPLIB: SP: DELETE_GID timed out");
- return -ETIMEDOUT;
- }
- if (resp->status ||
- le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
- dev_err(&res->pdev->dev,
- "QPLIB: SP: DELETE_GID failed ");
- dev_err(&res->pdev->dev,
- "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
- resp->status, le16_to_cpu(req.cookie),
- le16_to_cpu(resp->cookie));
- return -EINVAL;
- }
+ rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
+ (void *)&resp, NULL, 0);
+ if (rc)
+ return rc;
}
memcpy(&sgid_tbl->tbl[index], &bnxt_qplib_gid_zero,
sizeof(bnxt_qplib_gid_zero));
@@ -234,7 +215,7 @@
struct bnxt_qplib_res,
sgid_tbl);
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
- int i, free_idx, rc = 0;
+ int i, free_idx;
if (!sgid_tbl) {
dev_err(&res->pdev->dev, "QPLIB: SGID table not allocated");
@@ -266,10 +247,11 @@
}
if (update) {
struct cmdq_add_gid req;
- struct creq_add_gid_resp *resp;
+ struct creq_add_gid_resp resp;
u16 cmd_flags = 0;
u32 temp32[4];
u16 temp16[3];
+ int rc;
RCFW_CMD_PREP(req, ADD_GID, cmd_flags);
@@ -290,31 +272,11 @@
req.src_mac[1] = cpu_to_be16(temp16[1]);
req.src_mac[2] = cpu_to_be16(temp16[2]);
- resp = (struct creq_add_gid_resp *)
- bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
- NULL, 0);
- if (!resp) {
- dev_err(&res->pdev->dev,
- "QPLIB: SP: ADD_GID send failed");
- return -EINVAL;
- }
- if (!bnxt_qplib_rcfw_wait_for_resp(rcfw,
- le16_to_cpu(req.cookie))) {
- /* Cmd timed out */
- dev_err(&res->pdev->dev,
- "QPIB: SP: ADD_GID timed out");
- return -ETIMEDOUT;
- }
- if (resp->status ||
- le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
- dev_err(&res->pdev->dev, "QPLIB: SP: ADD_GID failed ");
- dev_err(&res->pdev->dev,
- "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
- resp->status, le16_to_cpu(req.cookie),
- le16_to_cpu(resp->cookie));
- return -EINVAL;
- }
- sgid_tbl->hw_id[free_idx] = le32_to_cpu(resp->xid);
+ rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
+ (void *)&resp, NULL, 0);
+ if (rc)
+ return rc;
+ sgid_tbl->hw_id[free_idx] = le32_to_cpu(resp.xid);
}
/* Add GID to the sgid_tbl */
memcpy(&sgid_tbl->tbl[free_idx], gid, sizeof(*gid));
@@ -325,7 +287,7 @@
*index = free_idx;
/* unlock */
- return rc;
+ return 0;
}
/* pkeys */
@@ -422,10 +384,11 @@
{
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
struct cmdq_create_ah req;
- struct creq_create_ah_resp *resp;
+ struct creq_create_ah_resp resp;
u16 cmd_flags = 0;
u32 temp32[4];
u16 temp16[3];
+ int rc;
RCFW_CMD_PREP(req, CREATE_AH, cmd_flags);
@@ -450,28 +413,12 @@
req.dest_mac[1] = cpu_to_le16(temp16[1]);
req.dest_mac[2] = cpu_to_le16(temp16[2]);
- resp = (struct creq_create_ah_resp *)
- bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
- NULL, 1);
- if (!resp) {
- dev_err(&rcfw->pdev->dev, "QPLIB: SP: CREATE_AH send failed");
- return -EINVAL;
- }
- if (!bnxt_qplib_rcfw_block_for_resp(rcfw, le16_to_cpu(req.cookie))) {
- /* Cmd timed out */
- dev_err(&rcfw->pdev->dev, "QPLIB: SP: CREATE_AH timed out");
- return -ETIMEDOUT;
- }
- if (resp->status ||
- le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
- dev_err(&rcfw->pdev->dev, "QPLIB: SP: CREATE_AH failed ");
- dev_err(&rcfw->pdev->dev,
- "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
- resp->status, le16_to_cpu(req.cookie),
- le16_to_cpu(resp->cookie));
- return -EINVAL;
- }
- ah->id = le32_to_cpu(resp->xid);
+ rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
+ NULL, 1);
+ if (rc)
+ return rc;
+
+ ah->id = le32_to_cpu(resp.xid);
return 0;
}
@@ -479,35 +426,19 @@
{
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
struct cmdq_destroy_ah req;
- struct creq_destroy_ah_resp *resp;
+ struct creq_destroy_ah_resp resp;
u16 cmd_flags = 0;
+ int rc;
/* Clean up the AH table in the device */
RCFW_CMD_PREP(req, DESTROY_AH, cmd_flags);
req.ah_cid = cpu_to_le32(ah->id);
- resp = (struct creq_destroy_ah_resp *)
- bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
- NULL, 1);
- if (!resp) {
- dev_err(&rcfw->pdev->dev, "QPLIB: SP: DESTROY_AH send failed");
- return -EINVAL;
- }
- if (!bnxt_qplib_rcfw_block_for_resp(rcfw, le16_to_cpu(req.cookie))) {
- /* Cmd timed out */
- dev_err(&rcfw->pdev->dev, "QPLIB: SP: DESTROY_AH timed out");
- return -ETIMEDOUT;
- }
- if (resp->status ||
- le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
- dev_err(&rcfw->pdev->dev, "QPLIB: SP: DESTROY_AH failed ");
- dev_err(&rcfw->pdev->dev,
- "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
- resp->status, le16_to_cpu(req.cookie),
- le16_to_cpu(resp->cookie));
- return -EINVAL;
- }
+ rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
+ NULL, 1);
+ if (rc)
+ return rc;
return 0;
}
@@ -516,8 +447,9 @@
{
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
struct cmdq_deallocate_key req;
- struct creq_deallocate_key_resp *resp;
+ struct creq_deallocate_key_resp resp;
u16 cmd_flags = 0;
+ int rc;
if (mrw->lkey == 0xFFFFFFFF) {
dev_info(&res->pdev->dev,
@@ -536,27 +468,11 @@
else
req.key = cpu_to_le32(mrw->lkey);
- resp = (struct creq_deallocate_key_resp *)
- bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
- NULL, 0);
- if (!resp) {
- dev_err(&res->pdev->dev, "QPLIB: SP: FREE_MR send failed");
- return -EINVAL;
- }
- if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) {
- /* Cmd timed out */
- dev_err(&res->pdev->dev, "QPLIB: SP: FREE_MR timed out");
- return -ETIMEDOUT;
- }
- if (resp->status ||
- le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
- dev_err(&res->pdev->dev, "QPLIB: SP: FREE_MR failed ");
- dev_err(&res->pdev->dev,
- "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
- resp->status, le16_to_cpu(req.cookie),
- le16_to_cpu(resp->cookie));
- return -EINVAL;
- }
+ rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
+ NULL, 0);
+ if (rc)
+ return rc;
+
/* Free the qplib's MRW memory */
if (mrw->hwq.max_elements)
bnxt_qplib_free_hwq(res->pdev, &mrw->hwq);
@@ -568,9 +484,10 @@
{
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
struct cmdq_allocate_mrw req;
- struct creq_allocate_mrw_resp *resp;
+ struct creq_allocate_mrw_resp resp;
u16 cmd_flags = 0;
unsigned long tmp;
+ int rc;
RCFW_CMD_PREP(req, ALLOCATE_MRW, cmd_flags);
@@ -584,33 +501,17 @@
tmp = (unsigned long)mrw;
req.mrw_handle = cpu_to_le64(tmp);
- resp = (struct creq_allocate_mrw_resp *)
- bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
- NULL, 0);
- if (!resp) {
- dev_err(&rcfw->pdev->dev, "QPLIB: SP: ALLOC_MRW send failed");
- return -EINVAL;
- }
- if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) {
- /* Cmd timed out */
- dev_err(&rcfw->pdev->dev, "QPLIB: SP: ALLOC_MRW timed out");
- return -ETIMEDOUT;
- }
- if (resp->status ||
- le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
- dev_err(&rcfw->pdev->dev, "QPLIB: SP: ALLOC_MRW failed ");
- dev_err(&rcfw->pdev->dev,
- "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
- resp->status, le16_to_cpu(req.cookie),
- le16_to_cpu(resp->cookie));
- return -EINVAL;
- }
+ rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
+ (void *)&resp, NULL, 0);
+ if (rc)
+ return rc;
+
if ((mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1) ||
(mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2A) ||
(mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B))
- mrw->rkey = le32_to_cpu(resp->xid);
+ mrw->rkey = le32_to_cpu(resp.xid);
else
- mrw->lkey = le32_to_cpu(resp->xid);
+ mrw->lkey = le32_to_cpu(resp.xid);
return 0;
}
@@ -619,40 +520,17 @@
{
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
struct cmdq_deregister_mr req;
- struct creq_deregister_mr_resp *resp;
+ struct creq_deregister_mr_resp resp;
u16 cmd_flags = 0;
int rc;
RCFW_CMD_PREP(req, DEREGISTER_MR, cmd_flags);
req.lkey = cpu_to_le32(mrw->lkey);
- resp = (struct creq_deregister_mr_resp *)
- bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
- NULL, block);
- if (!resp) {
- dev_err(&rcfw->pdev->dev, "QPLIB: SP: DEREG_MR send failed");
- return -EINVAL;
- }
- if (block)
- rc = bnxt_qplib_rcfw_block_for_resp(rcfw,
- le16_to_cpu(req.cookie));
- else
- rc = bnxt_qplib_rcfw_wait_for_resp(rcfw,
- le16_to_cpu(req.cookie));
- if (!rc) {
- /* Cmd timed out */
- dev_err(&res->pdev->dev, "QPLIB: SP: DEREG_MR timed out");
- return -ETIMEDOUT;
- }
- if (resp->status ||
- le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
- dev_err(&rcfw->pdev->dev, "QPLIB: SP: DEREG_MR failed ");
- dev_err(&rcfw->pdev->dev,
- "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
- resp->status, le16_to_cpu(req.cookie),
- le16_to_cpu(resp->cookie));
- return -EINVAL;
- }
+ rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
+ (void *)&resp, NULL, block);
+ if (rc)
+ return rc;
/* Free the qplib's MR memory */
if (mrw->hwq.max_elements) {
@@ -669,7 +547,7 @@
{
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
struct cmdq_register_mr req;
- struct creq_register_mr_resp *resp;
+ struct creq_register_mr_resp resp;
u16 cmd_flags = 0, level;
int pg_ptrs, pages, i, rc;
dma_addr_t **pbl_ptr;
@@ -730,36 +608,11 @@
req.key = cpu_to_le32(mr->lkey);
req.mr_size = cpu_to_le64(mr->total_size);
- resp = (struct creq_register_mr_resp *)
- bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
- NULL, block);
- if (!resp) {
- dev_err(&res->pdev->dev, "SP: REG_MR send failed");
- rc = -EINVAL;
+ rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
+ (void *)&resp, NULL, block);
+ if (rc)
goto fail;
- }
- if (block)
- rc = bnxt_qplib_rcfw_block_for_resp(rcfw,
- le16_to_cpu(req.cookie));
- else
- rc = bnxt_qplib_rcfw_wait_for_resp(rcfw,
- le16_to_cpu(req.cookie));
- if (!rc) {
- /* Cmd timed out */
- dev_err(&res->pdev->dev, "SP: REG_MR timed out");
- rc = -ETIMEDOUT;
- goto fail;
- }
- if (resp->status ||
- le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
- dev_err(&res->pdev->dev, "QPLIB: SP: REG_MR failed ");
- dev_err(&res->pdev->dev,
- "QPLIB: SP: with status 0x%x cmdq 0x%x resp 0x%x",
- resp->status, le16_to_cpu(req.cookie),
- le16_to_cpu(resp->cookie));
- rc = -EINVAL;
- goto fail;
- }
+
return 0;
fail:
@@ -804,35 +657,15 @@
{
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
struct cmdq_map_tc_to_cos req;
- struct creq_map_tc_to_cos_resp *resp;
+ struct creq_map_tc_to_cos_resp resp;
u16 cmd_flags = 0;
- int tleft;
+ int rc = 0;
RCFW_CMD_PREP(req, MAP_TC_TO_COS, cmd_flags);
req.cos0 = cpu_to_le16(cids[0]);
req.cos1 = cpu_to_le16(cids[1]);
- resp = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, NULL, 0);
- if (!resp) {
- dev_err(&res->pdev->dev, "QPLIB: SP: MAP_TC2COS send failed");
- return -EINVAL;
- }
-
- tleft = bnxt_qplib_rcfw_block_for_resp(rcfw, le16_to_cpu(req.cookie));
- if (!tleft) {
- dev_err(&res->pdev->dev, "QPLIB: SP: MAP_TC2COS timed out");
- return -ETIMEDOUT;
- }
-
- if (resp->status ||
- le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
- dev_err(&res->pdev->dev, "QPLIB: SP: MAP_TC2COS failed ");
- dev_err(&res->pdev->dev,
- "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
- resp->status, le16_to_cpu(req.cookie),
- le16_to_cpu(resp->cookie));
- return -EINVAL;
- }
-
+ rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
+ (void *)&resp, NULL, 0);
return 0;
}
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.h b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
index 1442a61..a543f95 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
@@ -40,6 +40,8 @@
#ifndef __BNXT_QPLIB_SP_H__
#define __BNXT_QPLIB_SP_H__
+#define BNXT_QPLIB_RESERVED_QP_WRS 128
+
struct bnxt_qplib_dev_attr {
char fw_ver[32];
u16 max_sgid;
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index b6fe459..0910faf 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -488,6 +488,7 @@
ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *)));
release_ep_resources(ep);
+ kfree_skb(skb);
return 0;
}
@@ -498,6 +499,7 @@
ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *)));
c4iw_put_ep(&ep->parent_ep->com);
release_ep_resources(ep);
+ kfree_skb(skb);
return 0;
}
@@ -569,11 +571,13 @@
pr_debug("%s rdev %p\n", __func__, rdev);
req->cmd = CPL_ABORT_NO_RST;
+ skb_get(skb);
ret = c4iw_ofld_send(rdev, skb);
if (ret) {
__state_set(&ep->com, DEAD);
queue_arp_failure_cpl(ep, skb, FAKE_CPL_PUT_EP_SAFE);
- }
+ } else
+ kfree_skb(skb);
}
static int send_flowc(struct c4iw_ep *ep)
@@ -2517,7 +2521,8 @@
goto reject;
}
- hdrs = sizeof(struct iphdr) + sizeof(struct tcphdr) +
+ hdrs = ((iptype == 4) ? sizeof(struct iphdr) : sizeof(struct ipv6hdr)) +
+ sizeof(struct tcphdr) +
((enable_tcp_timestamps && req->tcpopt.tstamp) ? 12 : 0);
if (peer_mss && child_ep->mtu > (peer_mss + hdrs))
child_ep->mtu = peer_mss + hdrs;
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index 329fb65e..ae0b79a 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -767,7 +767,7 @@
kfree(entry);
}
- list_for_each_safe(pos, nxt, &uctx->qpids) {
+ list_for_each_safe(pos, nxt, &uctx->cqids) {
entry = list_entry(pos, struct c4iw_qid_list, entry);
list_del_init(&entry->entry);
kfree(entry);
@@ -880,13 +880,15 @@
rdev->free_workq = create_singlethread_workqueue("iw_cxgb4_free");
if (!rdev->free_workq) {
err = -ENOMEM;
- goto err_free_status_page;
+ goto err_free_status_page_and_wr_log;
}
rdev->status_page->db_off = 0;
return 0;
-err_free_status_page:
+err_free_status_page_and_wr_log:
+ if (c4iw_wr_log && rdev->wr_log)
+ kfree(rdev->wr_log);
free_page((unsigned long)rdev->status_page);
destroy_ocqp_pool:
c4iw_ocqp_pool_destroy(rdev);
@@ -903,9 +905,11 @@
{
destroy_workqueue(rdev->free_workq);
kfree(rdev->wr_log);
+ c4iw_release_dev_ucontext(rdev, &rdev->uctx);
free_page((unsigned long)rdev->status_page);
c4iw_pblpool_destroy(rdev);
c4iw_rqtpool_destroy(rdev);
+ c4iw_ocqp_pool_destroy(rdev);
c4iw_destroy_resource(&rdev->resource);
}
@@ -971,7 +975,7 @@
devp->rdev.lldi.sge_egrstatuspagesize);
devp->rdev.hw_queue.t4_eq_status_entries =
- devp->rdev.lldi.sge_ingpadboundary > 64 ? 2 : 1;
+ devp->rdev.lldi.sge_egrstatuspagesize / 64;
devp->rdev.hw_queue.t4_max_eq_size = 65520;
devp->rdev.hw_queue.t4_max_iq_size = 65520;
devp->rdev.hw_queue.t4_max_rq_size = 8192 -
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index 5d6b1ee..2ba00b8 100644
--- a/drivers/infiniband/hw/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -6312,25 +6312,38 @@
}
}
-static void write_global_credit(struct hfi1_devdata *dd,
- u8 vau, u16 total, u16 shared)
+/*
+ * Set up allocation unit vaulue.
+ */
+void set_up_vau(struct hfi1_devdata *dd, u8 vau)
{
- write_csr(dd, SEND_CM_GLOBAL_CREDIT,
- ((u64)total <<
- SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT) |
- ((u64)shared <<
- SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT) |
- ((u64)vau << SEND_CM_GLOBAL_CREDIT_AU_SHIFT));
+ u64 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
+
+ /* do not modify other values in the register */
+ reg &= ~SEND_CM_GLOBAL_CREDIT_AU_SMASK;
+ reg |= (u64)vau << SEND_CM_GLOBAL_CREDIT_AU_SHIFT;
+ write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
}
/*
* Set up initial VL15 credits of the remote. Assumes the rest of
- * the CM credit registers are zero from a previous global or credit reset .
+ * the CM credit registers are zero from a previous global or credit reset.
+ * Shared limit for VL15 will always be 0.
*/
-void set_up_vl15(struct hfi1_devdata *dd, u8 vau, u16 vl15buf)
+void set_up_vl15(struct hfi1_devdata *dd, u16 vl15buf)
{
- /* leave shared count at zero for both global and VL15 */
- write_global_credit(dd, vau, vl15buf, 0);
+ u64 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
+
+ /* set initial values for total and shared credit limit */
+ reg &= ~(SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK |
+ SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK);
+
+ /*
+ * Set total limit to be equal to VL15 credits.
+ * Leave shared limit at 0.
+ */
+ reg |= (u64)vl15buf << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT;
+ write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
write_csr(dd, SEND_CM_CREDIT_VL15, (u64)vl15buf
<< SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
@@ -6348,9 +6361,11 @@
for (i = 0; i < TXE_NUM_DATA_VL; i++)
write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
write_csr(dd, SEND_CM_CREDIT_VL15, 0);
- write_global_credit(dd, 0, 0, 0);
+ write_csr(dd, SEND_CM_GLOBAL_CREDIT, 0);
/* reset the CM block */
pio_send_control(dd, PSC_CM_RESET);
+ /* reset cached value */
+ dd->vl15buf_cached = 0;
}
/* convert a vCU to a CU */
@@ -6839,24 +6854,35 @@
{
struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
link_up_work);
+ struct hfi1_devdata *dd = ppd->dd;
+
set_link_state(ppd, HLS_UP_INIT);
/* cache the read of DC_LCB_STS_ROUND_TRIP_LTP_CNT */
- read_ltp_rtt(ppd->dd);
+ read_ltp_rtt(dd);
/*
* OPA specifies that certain counters are cleared on a transition
* to link up, so do that.
*/
- clear_linkup_counters(ppd->dd);
+ clear_linkup_counters(dd);
/*
* And (re)set link up default values.
*/
set_linkup_defaults(ppd);
+ /*
+ * Set VL15 credits. Use cached value from verify cap interrupt.
+ * In case of quick linkup or simulator, vl15 value will be set by
+ * handle_linkup_change. VerifyCap interrupt handler will not be
+ * called in those scenarios.
+ */
+ if (!(quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR))
+ set_up_vl15(dd, dd->vl15buf_cached);
+
/* enforce link speed enabled */
if ((ppd->link_speed_active & ppd->link_speed_enabled) == 0) {
/* oops - current speed is not enabled, bounce */
- dd_dev_err(ppd->dd,
+ dd_dev_err(dd,
"Link speed active 0x%x is outside enabled 0x%x, downing link\n",
ppd->link_speed_active, ppd->link_speed_enabled);
set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SPEED_POLICY, 0,
@@ -7357,7 +7383,14 @@
*/
if (vau == 0)
vau = 1;
- set_up_vl15(dd, vau, vl15buf);
+ set_up_vau(dd, vau);
+
+ /*
+ * Set VL15 credits to 0 in global credit register. Cache remote VL15
+ * credits value and wait for link-up interrupt ot set it.
+ */
+ set_up_vl15(dd, 0);
+ dd->vl15buf_cached = vl15buf;
/* set up the LCB CRC mode */
crc_mask = ppd->port_crc_mode_enabled & partner_supported_crc;
diff --git a/drivers/infiniband/hw/hfi1/chip_registers.h b/drivers/infiniband/hw/hfi1/chip_registers.h
index 5bfa839..793514f 100644
--- a/drivers/infiniband/hw/hfi1/chip_registers.h
+++ b/drivers/infiniband/hw/hfi1/chip_registers.h
@@ -839,7 +839,9 @@
#define SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK 0x8ull
#define SEND_CM_CTRL_RESETCSR 0x0000000000000020ull
#define SEND_CM_GLOBAL_CREDIT (TXE + 0x000000000508)
+#define SEND_CM_GLOBAL_CREDIT_AU_MASK 0x7ull
#define SEND_CM_GLOBAL_CREDIT_AU_SHIFT 16
+#define SEND_CM_GLOBAL_CREDIT_AU_SMASK 0x70000ull
#define SEND_CM_GLOBAL_CREDIT_RESETCSR 0x0000094000030000ull
#define SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_MASK 0xFFFFull
#define SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT 0
diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h
index da322e6..414a04a 100644
--- a/drivers/infiniband/hw/hfi1/hfi.h
+++ b/drivers/infiniband/hw/hfi1/hfi.h
@@ -1045,6 +1045,14 @@
/* initial vl15 credits to use */
u16 vl15_init;
+ /*
+ * Cached value for vl15buf, read during verify cap interrupt. VL15
+ * credits are to be kept at 0 and set when handling the link-up
+ * interrupt. This removes the possibility of receiving VL15 MAD
+ * packets before this HFI is ready.
+ */
+ u16 vl15buf_cached;
+
/* Misc small ints */
u8 n_krcv_queues;
u8 qos_shift;
@@ -1598,7 +1606,8 @@
int fm_get_table(struct hfi1_pportdata *ppd, int which, void *t);
int fm_set_table(struct hfi1_pportdata *ppd, int which, void *t);
-void set_up_vl15(struct hfi1_devdata *dd, u8 vau, u16 vl15buf);
+void set_up_vau(struct hfi1_devdata *dd, u8 vau);
+void set_up_vl15(struct hfi1_devdata *dd, u16 vl15buf);
void reset_link_credits(struct hfi1_devdata *dd);
void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu);
diff --git a/drivers/infiniband/hw/hfi1/intr.c b/drivers/infiniband/hw/hfi1/intr.c
index ba265d0..04a5082d 100644
--- a/drivers/infiniband/hw/hfi1/intr.c
+++ b/drivers/infiniband/hw/hfi1/intr.c
@@ -130,7 +130,8 @@
* the remote values. Both sides must be using the values.
*/
if (quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
- set_up_vl15(dd, dd->vau, dd->vl15_init);
+ set_up_vau(dd, dd->vau);
+ set_up_vl15(dd, dd->vl15_init);
assign_remote_cm_au_table(dd, dd->vcu);
}
diff --git a/drivers/infiniband/hw/hfi1/pcie.c b/drivers/infiniband/hw/hfi1/pcie.c
index 93faf86..6a9f6f9 100644
--- a/drivers/infiniband/hw/hfi1/pcie.c
+++ b/drivers/infiniband/hw/hfi1/pcie.c
@@ -207,8 +207,8 @@
/*
* Save BARs and command to rewrite after device reset.
*/
- dd->pcibar0 = addr;
- dd->pcibar1 = addr >> 32;
+ pci_read_config_dword(dd->pcidev, PCI_BASE_ADDRESS_0, &dd->pcibar0);
+ pci_read_config_dword(dd->pcidev, PCI_BASE_ADDRESS_1, &dd->pcibar1);
pci_read_config_dword(dd->pcidev, PCI_ROM_ADDRESS, &dd->pci_rom);
pci_read_config_word(dd->pcidev, PCI_COMMAND, &dd->pci_command);
pcie_capability_read_word(dd->pcidev, PCI_EXP_DEVCTL, &dd->pcie_devctl);
diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c
index 069bdaf..1080778 100644
--- a/drivers/infiniband/hw/hfi1/rc.c
+++ b/drivers/infiniband/hw/hfi1/rc.c
@@ -2159,8 +2159,11 @@
ret = hfi1_rvt_get_rwqe(qp, 1);
if (ret < 0)
goto nack_op_err;
- if (!ret)
+ if (!ret) {
+ /* peer will send again */
+ rvt_put_ss(&qp->r_sge);
goto rnr_nak;
+ }
wc.ex.imm_data = ohdr->u.rc.imm_data;
wc.wc_flags = IB_WC_WITH_IMM;
goto send_last;
diff --git a/drivers/infiniband/hw/hfi1/sysfs.c b/drivers/infiniband/hw/hfi1/sysfs.c
index 50d140d..2f3bbca 100644
--- a/drivers/infiniband/hw/hfi1/sysfs.c
+++ b/drivers/infiniband/hw/hfi1/sysfs.c
@@ -196,7 +196,8 @@
};
static struct attribute *port_cc_default_attributes[] = {
- &cc_prescan_attr.attr
+ &cc_prescan_attr.attr,
+ NULL
};
static struct kobj_type port_cc_ktype = {
diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c
index f3bc01b..6ae98aa 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_cm.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c
@@ -784,7 +784,6 @@
}
ctrl_ird |= IETF_PEER_TO_PEER;
- ctrl_ird |= IETF_FLPDU_ZERO_LEN;
switch (mpa_key) {
case MPA_KEY_REQUEST:
@@ -2446,8 +2445,8 @@
} else {
type = I40IW_CM_EVENT_CONNECTED;
cm_node->state = I40IW_CM_STATE_OFFLOADED;
- i40iw_send_ack(cm_node);
}
+ i40iw_send_ack(cm_node);
break;
default:
pr_err("%s wrong cm_node state =%d\n", __func__, cm_node->state);
diff --git a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
index f82483b..a027e20 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
@@ -285,28 +285,20 @@
struct i40iw_sc_dev *dev = vsi->dev;
struct i40iw_sc_qp *qp = NULL;
bool qs_handle_change = false;
- bool mss_change = false;
unsigned long flags;
u16 qs_handle;
int i;
- if (vsi->mss != l2params->mss) {
- mss_change = true;
- vsi->mss = l2params->mss;
- }
+ vsi->mss = l2params->mss;
i40iw_fill_qos_list(l2params->qs_handle_list);
for (i = 0; i < I40IW_MAX_USER_PRIORITY; i++) {
qs_handle = l2params->qs_handle_list[i];
if (vsi->qos[i].qs_handle != qs_handle)
qs_handle_change = true;
- else if (!mss_change)
- continue; /* no MSS nor qs handle change */
spin_lock_irqsave(&vsi->qos[i].lock, flags);
qp = i40iw_get_qp(&vsi->qos[i].qplist, qp);
while (qp) {
- if (mss_change)
- i40iw_qp_mss_modify(dev, qp);
if (qs_handle_change) {
qp->qs_handle = qs_handle;
/* issue cqp suspend command */
@@ -2395,7 +2387,6 @@
set_64bit_val(wqe,
8,
- LS_64(info->new_mss, I40IW_CQPSQ_QP_NEWMSS) |
LS_64(term_len, I40IW_CQPSQ_QP_TERMLEN));
set_64bit_val(wqe, 16, qp->hw_host_ctx_pa);
@@ -2410,7 +2401,6 @@
LS_64(info->cq_num_valid, I40IW_CQPSQ_QP_CQNUMVALID) |
LS_64(info->force_loopback, I40IW_CQPSQ_QP_FORCELOOPBACK) |
LS_64(qp->qp_type, I40IW_CQPSQ_QP_QPTYPE) |
- LS_64(info->mss_change, I40IW_CQPSQ_QP_MSSCHANGE) |
LS_64(info->static_rsrc, I40IW_CQPSQ_QP_STATRSRC) |
LS_64(info->remove_hash_idx, I40IW_CQPSQ_QP_REMOVEHASHENTRY) |
LS_64(term_actions, I40IW_CQPSQ_QP_TERMACT) |
diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c
index 2728af3..a3f18a2 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_main.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_main.c
@@ -1319,13 +1319,13 @@
status = i40iw_obj_aligned_mem(iwdev, &mem, I40IW_QUERY_FPM_BUF_SIZE,
I40IW_FPM_QUERY_BUF_ALIGNMENT_MASK);
if (status)
- goto exit;
+ goto error;
info.fpm_query_buf_pa = mem.pa;
info.fpm_query_buf = mem.va;
status = i40iw_obj_aligned_mem(iwdev, &mem, I40IW_COMMIT_FPM_BUF_SIZE,
I40IW_FPM_COMMIT_BUF_ALIGNMENT_MASK);
if (status)
- goto exit;
+ goto error;
info.fpm_commit_buf_pa = mem.pa;
info.fpm_commit_buf = mem.va;
info.hmc_fn_id = ldev->fid;
@@ -1347,11 +1347,9 @@
info.exception_lan_queue = 1;
info.vchnl_send = i40iw_virtchnl_send;
status = i40iw_device_init(&iwdev->sc_dev, &info);
-exit:
- if (status) {
- kfree(iwdev->hmc_info_mem);
- iwdev->hmc_info_mem = NULL;
- }
+
+ if (status)
+ goto error;
memset(&vsi_info, 0, sizeof(vsi_info));
vsi_info.dev = &iwdev->sc_dev;
vsi_info.back_vsi = (void *)iwdev;
@@ -1362,11 +1360,19 @@
memset(&stats_info, 0, sizeof(stats_info));
stats_info.fcn_id = ldev->fid;
stats_info.pestat = kzalloc(sizeof(*stats_info.pestat), GFP_KERNEL);
+ if (!stats_info.pestat) {
+ status = I40IW_ERR_NO_MEMORY;
+ goto error;
+ }
stats_info.stats_initialize = true;
if (stats_info.pestat)
i40iw_vsi_stats_init(&iwdev->vsi, &stats_info);
}
return status;
+error:
+ kfree(iwdev->hmc_info_mem);
+ iwdev->hmc_info_mem = NULL;
+ return status;
}
/**
diff --git a/drivers/infiniband/hw/i40iw/i40iw_osdep.h b/drivers/infiniband/hw/i40iw/i40iw_osdep.h
index aa66c1c..f27be3e 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_osdep.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_osdep.h
@@ -199,7 +199,6 @@
struct i40iw_virtchnl_work_info *work_info, u32 iw_vf_idx);
void *i40iw_remove_head(struct list_head *list);
void i40iw_qp_suspend_resume(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp, bool suspend);
-void i40iw_qp_mss_modify(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp);
void i40iw_term_modify_qp(struct i40iw_sc_qp *qp, u8 next_state, u8 term, u8 term_len);
void i40iw_terminate_done(struct i40iw_sc_qp *qp, int timeout_occurred);
diff --git a/drivers/infiniband/hw/i40iw/i40iw_type.h b/drivers/infiniband/hw/i40iw/i40iw_type.h
index 7b76259..959ec81 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_type.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_type.h
@@ -541,7 +541,6 @@
struct i40iw_modify_qp_info {
u64 rx_win0;
u64 rx_win1;
- u16 new_mss;
u8 next_iwarp_state;
u8 termlen;
bool ord_valid;
@@ -554,7 +553,6 @@
bool dont_send_term;
bool dont_send_fin;
bool cached_var_valid;
- bool mss_change;
bool force_loopback;
};
diff --git a/drivers/infiniband/hw/i40iw/i40iw_utils.c b/drivers/infiniband/hw/i40iw/i40iw_utils.c
index 409a378..56d9869 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_utils.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_utils.c
@@ -757,23 +757,6 @@
}
/**
- * i40iw_qp_mss_modify - modify mss for qp
- * @dev: hardware control device structure
- * @qp: hardware control qp
- */
-void i40iw_qp_mss_modify(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp)
-{
- struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
- struct i40iw_qp *iwqp = (struct i40iw_qp *)qp->back_qp;
- struct i40iw_modify_qp_info info;
-
- memset(&info, 0, sizeof(info));
- info.mss_change = true;
- info.new_mss = qp->vsi->mss;
- i40iw_hw_modify_qp(iwdev, iwqp, &info, false);
-}
-
-/**
* i40iw_term_modify_qp - modify qp for term message
* @qp: hardware control qp
* @next_state: qp's next state
diff --git a/drivers/infiniband/hw/i40iw/i40iw_virtchnl.c b/drivers/infiniband/hw/i40iw/i40iw_virtchnl.c
index f4d1368..48fd327 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_virtchnl.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_virtchnl.c
@@ -443,10 +443,7 @@
if (!dev->vchnl_up)
return I40IW_ERR_NOT_READY;
if (vchnl_msg->iw_op_code == I40IW_VCHNL_OP_GET_VER) {
- if (vchnl_msg->iw_op_ver != I40IW_VCHNL_OP_GET_VER_V0)
- vchnl_pf_send_get_ver_resp(dev, vf_id, vchnl_msg);
- else
- vchnl_pf_send_get_ver_resp(dev, vf_id, vchnl_msg);
+ vchnl_pf_send_get_ver_resp(dev, vf_id, vchnl_msg);
return I40IW_SUCCESS;
}
for (iw_vf_idx = 0; iw_vf_idx < I40IW_MAX_PE_ENABLED_VF_COUNT; iw_vf_idx++) {
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index b469471..21d31cb 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -1578,6 +1578,7 @@
if (port < 0)
return;
ah.av.ib.port_pd = cpu_to_be32(port << 24 | (be32_to_cpu(ah.av.ib.port_pd) & 0xffffff));
+ ah.ibah.type = rdma_ah_find_type(&dev->ib_dev, port);
mlx4_ib_query_ah(&ah.ibah, &ah_attr);
if (rdma_ah_get_ah_flags(&ah_attr) & IB_AH_GRH)
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index d45772d..9ecc089 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -2979,6 +2979,18 @@
return ret;
}
+static u8 mlx5_get_umr_fence(u8 umr_fence_cap)
+{
+ switch (umr_fence_cap) {
+ case MLX5_CAP_UMR_FENCE_NONE:
+ return MLX5_FENCE_MODE_NONE;
+ case MLX5_CAP_UMR_FENCE_SMALL:
+ return MLX5_FENCE_MODE_INITIATOR_SMALL;
+ default:
+ return MLX5_FENCE_MODE_STRONG_ORDERING;
+ }
+}
+
static int create_dev_resources(struct mlx5_ib_resources *devr)
{
struct ib_srq_init_attr attr;
@@ -3680,8 +3692,10 @@
dev->ib_dev.check_mr_status = mlx5_ib_check_mr_status;
dev->ib_dev.get_port_immutable = mlx5_port_immutable;
dev->ib_dev.get_dev_fw_str = get_dev_fw_str;
- dev->ib_dev.alloc_rdma_netdev = mlx5_ib_alloc_rdma_netdev;
- dev->ib_dev.free_rdma_netdev = mlx5_ib_free_rdma_netdev;
+ if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads)) {
+ dev->ib_dev.alloc_rdma_netdev = mlx5_ib_alloc_rdma_netdev;
+ dev->ib_dev.free_rdma_netdev = mlx5_ib_free_rdma_netdev;
+ }
if (mlx5_core_is_pf(mdev)) {
dev->ib_dev.get_vf_config = mlx5_ib_get_vf_config;
dev->ib_dev.set_vf_link_state = mlx5_ib_set_vf_link_state;
@@ -3693,6 +3707,8 @@
mlx5_ib_internal_fill_odp_caps(dev);
+ dev->umr_fence = mlx5_get_umr_fence(MLX5_CAP_GEN(mdev, umr_fence));
+
if (MLX5_CAP_GEN(mdev, imaicl)) {
dev->ib_dev.alloc_mw = mlx5_ib_alloc_mw;
dev->ib_dev.dealloc_mw = mlx5_ib_dealloc_mw;
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 38c877b..bdcf254 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -349,7 +349,7 @@
struct mlx5_ib_wq rq;
u8 sq_signal_bits;
- u8 fm_cache;
+ u8 next_fence;
struct mlx5_ib_wq sq;
/* serialize qp state modifications
@@ -654,6 +654,7 @@
struct mlx5_ib_port *port;
struct mlx5_sq_bfreg bfreg;
struct mlx5_sq_bfreg fp_bfreg;
+ u8 umr_fence;
};
static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 93959e1..ebb6768 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -3738,24 +3738,6 @@
}
}
-static u8 get_fence(u8 fence, struct ib_send_wr *wr)
-{
- if (unlikely(wr->opcode == IB_WR_LOCAL_INV &&
- wr->send_flags & IB_SEND_FENCE))
- return MLX5_FENCE_MODE_STRONG_ORDERING;
-
- if (unlikely(fence)) {
- if (wr->send_flags & IB_SEND_FENCE)
- return MLX5_FENCE_MODE_SMALL_AND_FENCE;
- else
- return fence;
- } else if (unlikely(wr->send_flags & IB_SEND_FENCE)) {
- return MLX5_FENCE_MODE_FENCE;
- }
-
- return 0;
-}
-
static int begin_wqe(struct mlx5_ib_qp *qp, void **seg,
struct mlx5_wqe_ctrl_seg **ctrl,
struct ib_send_wr *wr, unsigned *idx,
@@ -3784,8 +3766,7 @@
static void finish_wqe(struct mlx5_ib_qp *qp,
struct mlx5_wqe_ctrl_seg *ctrl,
u8 size, unsigned idx, u64 wr_id,
- int nreq, u8 fence, u8 next_fence,
- u32 mlx5_opcode)
+ int nreq, u8 fence, u32 mlx5_opcode)
{
u8 opmod = 0;
@@ -3793,7 +3774,6 @@
mlx5_opcode | ((u32)opmod << 24));
ctrl->qpn_ds = cpu_to_be32(size | (qp->trans_qp.base.mqp.qpn << 8));
ctrl->fm_ce_se |= fence;
- qp->fm_cache = next_fence;
if (unlikely(qp->wq_sig))
ctrl->signature = wq_sig(ctrl);
@@ -3853,7 +3833,6 @@
goto out;
}
- fence = qp->fm_cache;
num_sge = wr->num_sge;
if (unlikely(num_sge > qp->sq.max_gs)) {
mlx5_ib_warn(dev, "\n");
@@ -3870,6 +3849,19 @@
goto out;
}
+ if (wr->opcode == IB_WR_LOCAL_INV ||
+ wr->opcode == IB_WR_REG_MR) {
+ fence = dev->umr_fence;
+ next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
+ } else if (wr->send_flags & IB_SEND_FENCE) {
+ if (qp->next_fence)
+ fence = MLX5_FENCE_MODE_SMALL_AND_FENCE;
+ else
+ fence = MLX5_FENCE_MODE_FENCE;
+ } else {
+ fence = qp->next_fence;
+ }
+
switch (ibqp->qp_type) {
case IB_QPT_XRC_INI:
xrc = seg;
@@ -3896,7 +3888,6 @@
goto out;
case IB_WR_LOCAL_INV:
- next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
qp->sq.wr_data[idx] = IB_WR_LOCAL_INV;
ctrl->imm = cpu_to_be32(wr->ex.invalidate_rkey);
set_linv_wr(qp, &seg, &size);
@@ -3904,7 +3895,6 @@
break;
case IB_WR_REG_MR:
- next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
qp->sq.wr_data[idx] = IB_WR_REG_MR;
ctrl->imm = cpu_to_be32(reg_wr(wr)->key);
err = set_reg_wr(qp, reg_wr(wr), &seg, &size);
@@ -3927,9 +3917,8 @@
goto out;
}
- finish_wqe(qp, ctrl, size, idx, wr->wr_id,
- nreq, get_fence(fence, wr),
- next_fence, MLX5_OPCODE_UMR);
+ finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq,
+ fence, MLX5_OPCODE_UMR);
/*
* SET_PSV WQEs are not signaled and solicited
* on error
@@ -3954,9 +3943,8 @@
goto out;
}
- finish_wqe(qp, ctrl, size, idx, wr->wr_id,
- nreq, get_fence(fence, wr),
- next_fence, MLX5_OPCODE_SET_PSV);
+ finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq,
+ fence, MLX5_OPCODE_SET_PSV);
err = begin_wqe(qp, &seg, &ctrl, wr,
&idx, &size, nreq);
if (err) {
@@ -3966,7 +3954,6 @@
goto out;
}
- next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
err = set_psv_wr(&sig_handover_wr(wr)->sig_attrs->wire,
mr->sig->psv_wire.psv_idx, &seg,
&size);
@@ -3976,9 +3963,9 @@
goto out;
}
- finish_wqe(qp, ctrl, size, idx, wr->wr_id,
- nreq, get_fence(fence, wr),
- next_fence, MLX5_OPCODE_SET_PSV);
+ finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq,
+ fence, MLX5_OPCODE_SET_PSV);
+ qp->next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
num_sge = 0;
goto skip_psv;
@@ -4089,8 +4076,8 @@
}
}
- finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq,
- get_fence(fence, wr), next_fence,
+ qp->next_fence = next_fence;
+ finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq, fence,
mlx5_ib_opcode[wr->opcode]);
skip_psv:
if (0)
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index fb983df..30b256a 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -610,7 +610,6 @@
ctrl_ord = cm_node->ord_size & IETF_NO_IRD_ORD;
}
ctrl_ird |= IETF_PEER_TO_PEER;
- ctrl_ird |= IETF_FLPDU_ZERO_LEN;
switch (mpa_key) {
case MPA_KEY_REQUEST:
@@ -1826,7 +1825,7 @@
type = NES_CM_EVENT_CONNECTED;
cm_node->state = NES_CM_STATE_TSA;
}
-
+ send_ack(cm_node, NULL);
break;
default:
WARN_ON(1);
diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h
index aa08c76..d961f79 100644
--- a/drivers/infiniband/hw/qedr/qedr.h
+++ b/drivers/infiniband/hw/qedr/qedr.h
@@ -58,7 +58,10 @@
#define QEDR_MSG_QP " QP"
#define QEDR_MSG_GSI " GSI"
-#define QEDR_CQ_MAGIC_NUMBER (0x11223344)
+#define QEDR_CQ_MAGIC_NUMBER (0x11223344)
+
+#define FW_PAGE_SIZE (RDMA_RING_PAGE_SIZE)
+#define FW_PAGE_SHIFT (12)
struct qedr_dev;
diff --git a/drivers/infiniband/hw/qedr/qedr_cm.c b/drivers/infiniband/hw/qedr/qedr_cm.c
index 3d7705c..d86dbe8 100644
--- a/drivers/infiniband/hw/qedr/qedr_cm.c
+++ b/drivers/infiniband/hw/qedr/qedr_cm.c
@@ -270,11 +270,13 @@
return rc;
}
- vlan_id = rdma_vlan_dev_vlan_id(sgid_attr.ndev);
- if (vlan_id < VLAN_CFI_MASK)
- has_vlan = true;
- if (sgid_attr.ndev)
+ if (sgid_attr.ndev) {
+ vlan_id = rdma_vlan_dev_vlan_id(sgid_attr.ndev);
+ if (vlan_id < VLAN_CFI_MASK)
+ has_vlan = true;
+
dev_put(sgid_attr.ndev);
+ }
if (!memcmp(&sgid, &zgid, sizeof(sgid))) {
DP_ERR(dev, "gsi post send: GID not found GID index %d\n",
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index 17685cf..d6723c3 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -653,14 +653,15 @@
static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem,
struct qedr_pbl *pbl,
- struct qedr_pbl_info *pbl_info)
+ struct qedr_pbl_info *pbl_info, u32 pg_shift)
{
int shift, pg_cnt, pages, pbe_cnt, total_num_pbes = 0;
+ u32 fw_pg_cnt, fw_pg_per_umem_pg;
struct qedr_pbl *pbl_tbl;
struct scatterlist *sg;
struct regpair *pbe;
+ u64 pg_addr;
int entry;
- u32 addr;
if (!pbl_info->num_pbes)
return;
@@ -683,29 +684,35 @@
shift = umem->page_shift;
+ fw_pg_per_umem_pg = BIT(umem->page_shift - pg_shift);
+
for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
pages = sg_dma_len(sg) >> shift;
+ pg_addr = sg_dma_address(sg);
for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) {
- /* store the page address in pbe */
- pbe->lo = cpu_to_le32(sg_dma_address(sg) +
- (pg_cnt << shift));
- addr = upper_32_bits(sg_dma_address(sg) +
- (pg_cnt << shift));
- pbe->hi = cpu_to_le32(addr);
- pbe_cnt++;
- total_num_pbes++;
- pbe++;
+ for (fw_pg_cnt = 0; fw_pg_cnt < fw_pg_per_umem_pg;) {
+ pbe->lo = cpu_to_le32(pg_addr);
+ pbe->hi = cpu_to_le32(upper_32_bits(pg_addr));
- if (total_num_pbes == pbl_info->num_pbes)
- return;
+ pg_addr += BIT(pg_shift);
+ pbe_cnt++;
+ total_num_pbes++;
+ pbe++;
- /* If the given pbl is full storing the pbes,
- * move to next pbl.
- */
- if (pbe_cnt == (pbl_info->pbl_size / sizeof(u64))) {
- pbl_tbl++;
- pbe = (struct regpair *)pbl_tbl->va;
- pbe_cnt = 0;
+ if (total_num_pbes == pbl_info->num_pbes)
+ return;
+
+ /* If the given pbl is full storing the pbes,
+ * move to next pbl.
+ */
+ if (pbe_cnt ==
+ (pbl_info->pbl_size / sizeof(u64))) {
+ pbl_tbl++;
+ pbe = (struct regpair *)pbl_tbl->va;
+ pbe_cnt = 0;
+ }
+
+ fw_pg_cnt++;
}
}
}
@@ -754,7 +761,7 @@
u64 buf_addr, size_t buf_len,
int access, int dmasync)
{
- int page_cnt;
+ u32 fw_pages;
int rc;
q->buf_addr = buf_addr;
@@ -766,8 +773,10 @@
return PTR_ERR(q->umem);
}
- page_cnt = ib_umem_page_count(q->umem);
- rc = qedr_prepare_pbl_tbl(dev, &q->pbl_info, page_cnt, 0);
+ fw_pages = ib_umem_page_count(q->umem) <<
+ (q->umem->page_shift - FW_PAGE_SHIFT);
+
+ rc = qedr_prepare_pbl_tbl(dev, &q->pbl_info, fw_pages, 0);
if (rc)
goto err0;
@@ -777,7 +786,8 @@
goto err0;
}
- qedr_populate_pbls(dev, q->umem, q->pbl_tbl, &q->pbl_info);
+ qedr_populate_pbls(dev, q->umem, q->pbl_tbl, &q->pbl_info,
+ FW_PAGE_SHIFT);
return 0;
@@ -2226,7 +2236,7 @@
goto err1;
qedr_populate_pbls(dev, mr->umem, mr->info.pbl_table,
- &mr->info.pbl_info);
+ &mr->info.pbl_info, mr->umem->page_shift);
rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
if (rc) {
@@ -3209,6 +3219,10 @@
case IB_WC_REG_MR:
qp->wqe_wr_id[qp->sq.cons].mr->info.completed++;
break;
+ case IB_WC_RDMA_READ:
+ case IB_WC_SEND:
+ wc->byte_len = qp->wqe_wr_id[qp->sq.cons].bytes_len;
+ break;
default:
break;
}
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c
index fc8b885..4ddbcac 100644
--- a/drivers/infiniband/hw/qib/qib_rc.c
+++ b/drivers/infiniband/hw/qib/qib_rc.c
@@ -1956,8 +1956,10 @@
ret = qib_get_rwqe(qp, 1);
if (ret < 0)
goto nack_op_err;
- if (!ret)
+ if (!ret) {
+ rvt_put_ss(&qp->r_sge);
goto rnr_nak;
+ }
wc.ex.imm_data = ohdr->u.rc.imm_data;
hdrsize += 4;
wc.wc_flags = IB_WC_WITH_IMM;
diff --git a/drivers/infiniband/sw/rxe/rxe.h b/drivers/infiniband/sw/rxe/rxe.h
index ecdba2f..1ac5b85 100644
--- a/drivers/infiniband/sw/rxe/rxe.h
+++ b/drivers/infiniband/sw/rxe/rxe.h
@@ -68,6 +68,7 @@
static inline u32 rxe_crc32(struct rxe_dev *rxe,
u32 crc, void *next, size_t len)
{
+ u32 retval;
int err;
SHASH_DESC_ON_STACK(shash, rxe->tfm);
@@ -81,7 +82,9 @@
return crc32_le(crc, next, len);
}
- return *(u32 *)shash_desc_ctx(shash);
+ retval = *(u32 *)shash_desc_ctx(shash);
+ barrier_data(shash_desc_ctx(shash));
+ return retval;
}
int rxe_set_mtu(struct rxe_dev *rxe, unsigned int dev_mtu);
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index 83d709e..073e667 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -740,13 +740,8 @@
sge = ibwr->sg_list;
for (i = 0; i < num_sge; i++, sge++) {
- if (qp->is_user && copy_from_user(p, (__user void *)
- (uintptr_t)sge->addr, sge->length))
- return -EFAULT;
-
- else if (!qp->is_user)
- memcpy(p, (void *)(uintptr_t)sge->addr,
- sge->length);
+ memcpy(p, (void *)(uintptr_t)sge->addr,
+ sge->length);
p += sge->length;
}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
index 874b243..7871379 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
@@ -178,7 +178,7 @@
static int ipoib_get_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings *cmd)
{
- struct ipoib_dev_priv *priv = netdev_priv(netdev);
+ struct ipoib_dev_priv *priv = ipoib_priv(netdev);
struct ib_port_attr attr;
int ret, speed, width;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 0060b2f..efe7402 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -863,7 +863,6 @@
set_bit(IPOIB_STOP_REAPER, &priv->flags);
cancel_delayed_work(&priv->ah_reap_task);
set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
- napi_enable(&priv->napi);
ipoib_ib_dev_stop(dev);
return -1;
}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 2869d1a..1015a63 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -1590,12 +1590,14 @@
wait_for_completion(&priv->ntbl.deleted);
}
-void ipoib_dev_uninit_default(struct net_device *dev)
+static void ipoib_dev_uninit_default(struct net_device *dev)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
ipoib_transport_dev_cleanup(dev);
+ netif_napi_del(&priv->napi);
+
ipoib_cm_dev_cleanup(dev);
kfree(priv->rx_ring);
@@ -1649,6 +1651,7 @@
kfree(priv->rx_ring);
out:
+ netif_napi_del(&priv->napi);
return -ENOMEM;
}
@@ -2237,6 +2240,7 @@
device_init_failed:
free_netdev(priv->dev);
+ kfree(priv);
alloc_mem_failed:
return ERR_PTR(result);
@@ -2277,7 +2281,7 @@
static void ipoib_remove_one(struct ib_device *device, void *client_data)
{
- struct ipoib_dev_priv *priv, *tmp;
+ struct ipoib_dev_priv *priv, *tmp, *cpriv, *tcpriv;
struct list_head *dev_list = client_data;
if (!dev_list)
@@ -2300,7 +2304,14 @@
flush_workqueue(priv->wq);
unregister_netdev(priv->dev);
- free_netdev(priv->dev);
+ if (device->free_rdma_netdev)
+ device->free_rdma_netdev(priv->dev);
+ else
+ free_netdev(priv->dev);
+
+ list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list)
+ kfree(cpriv);
+
kfree(priv);
}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
index 36dc4fc..081b33d 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
@@ -133,13 +133,13 @@
snprintf(intf_name, sizeof intf_name, "%s.%04x",
ppriv->dev->name, pkey);
+ if (!rtnl_trylock())
+ return restart_syscall();
+
priv = ipoib_intf_alloc(ppriv->ca, ppriv->port, intf_name);
if (!priv)
return -ENOMEM;
- if (!rtnl_trylock())
- return restart_syscall();
-
down_write(&ppriv->vlan_rwsem);
/*
@@ -167,8 +167,10 @@
rtnl_unlock();
- if (result)
+ if (result) {
free_netdev(priv->dev);
+ kfree(priv);
+ }
return result;
}
@@ -209,6 +211,7 @@
if (dev) {
free_netdev(dev);
+ kfree(priv);
return 0;
}
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index def723a..2354c74 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -320,7 +320,7 @@
ch->path.sgid = target->sgid;
ch->path.dgid = target->orig_dgid;
ch->path.pkey = target->pkey;
- sa_path_set_service_id(&ch->path, target->service_id);
+ ch->path.service_id = target->service_id;
return 0;
}
@@ -575,7 +575,7 @@
return 0;
err_qp:
- srp_destroy_qp(ch, qp);
+ ib_destroy_qp(qp);
err_send_cq:
ib_free_cq(send_cq);
diff --git a/drivers/input/keyboard/tm2-touchkey.c b/drivers/input/keyboard/tm2-touchkey.c
index 485900f..abc266e 100644
--- a/drivers/input/keyboard/tm2-touchkey.c
+++ b/drivers/input/keyboard/tm2-touchkey.c
@@ -213,7 +213,7 @@
/* led device */
touchkey->led_dev.name = TM2_TOUCHKEY_DEV_NAME;
touchkey->led_dev.brightness = LED_FULL;
- touchkey->led_dev.max_brightness = LED_FULL;
+ touchkey->led_dev.max_brightness = LED_ON;
touchkey->led_dev.brightness_set = tm2_touchkey_led_brightness_set;
error = devm_led_classdev_register(&client->dev, &touchkey->led_dev);
diff --git a/drivers/input/misc/axp20x-pek.c b/drivers/input/misc/axp20x-pek.c
index f11807d..400869e 100644
--- a/drivers/input/misc/axp20x-pek.c
+++ b/drivers/input/misc/axp20x-pek.c
@@ -256,6 +256,42 @@
return 0;
}
+#ifdef CONFIG_ACPI
+static bool axp20x_pek_should_register_input(struct axp20x_pek *axp20x_pek,
+ struct platform_device *pdev)
+{
+ unsigned long long hrv = 0;
+ acpi_status status;
+
+ if (IS_ENABLED(CONFIG_INPUT_SOC_BUTTON_ARRAY) &&
+ axp20x_pek->axp20x->variant == AXP288_ID) {
+ status = acpi_evaluate_integer(ACPI_HANDLE(pdev->dev.parent),
+ "_HRV", NULL, &hrv);
+ if (ACPI_FAILURE(status))
+ dev_err(&pdev->dev, "Failed to get PMIC hardware revision\n");
+
+ /*
+ * On Cherry Trail platforms (hrv == 3), do not register the
+ * input device if there is an "INTCFD9" or "ACPI0011" gpio
+ * button ACPI device, as that handles the power button too,
+ * and otherwise we end up reporting all presses twice.
+ */
+ if (hrv == 3 && (acpi_dev_present("INTCFD9", NULL, -1) ||
+ acpi_dev_present("ACPI0011", NULL, -1)))
+ return false;
+
+ }
+
+ return true;
+}
+#else
+static bool axp20x_pek_should_register_input(struct axp20x_pek *axp20x_pek,
+ struct platform_device *pdev)
+{
+ return true;
+}
+#endif
+
static int axp20x_pek_probe(struct platform_device *pdev)
{
struct axp20x_pek *axp20x_pek;
@@ -268,13 +304,7 @@
axp20x_pek->axp20x = dev_get_drvdata(pdev->dev.parent);
- /*
- * Do not register the input device if there is an "INTCFD9"
- * gpio button ACPI device, that handles the power button too,
- * and otherwise we end up reporting all presses twice.
- */
- if (!acpi_dev_found("INTCFD9") ||
- !IS_ENABLED(CONFIG_INPUT_SOC_BUTTON_ARRAY)) {
+ if (axp20x_pek_should_register_input(axp20x_pek, pdev)) {
error = axp20x_pek_probe_input_device(axp20x_pek, pdev);
if (error)
return error;
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index e73d968..f1fa1f1 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -1118,8 +1118,10 @@
* Asus UX32VD 0x361f02 00, 15, 0e clickpad
* Avatar AVIU-145A2 0x361f00 ? clickpad
* Fujitsu LIFEBOOK E544 0x470f00 d0, 12, 09 2 hw buttons
+ * Fujitsu LIFEBOOK E546 0x470f00 50, 12, 09 2 hw buttons
* Fujitsu LIFEBOOK E547 0x470f00 50, 12, 09 2 hw buttons
* Fujitsu LIFEBOOK E554 0x570f01 40, 14, 0c 2 hw buttons
+ * Fujitsu LIFEBOOK E557 0x570f01 40, 14, 0c 2 hw buttons
* Fujitsu T725 0x470f01 05, 12, 09 2 hw buttons
* Fujitsu H730 0x570f00 c0, 14, 0c 3 hw buttons (**)
* Gigabyte U2442 0x450f01 58, 17, 0c 2 hw buttons
@@ -1525,6 +1527,13 @@
},
},
{
+ /* Fujitsu LIFEBOOK E546 does not work with crc_enabled == 0 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E546"),
+ },
+ },
+ {
/* Fujitsu LIFEBOOK E547 does not work with crc_enabled == 0 */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
@@ -1546,6 +1555,13 @@
},
},
{
+ /* Fujitsu LIFEBOOK E557 does not work with crc_enabled == 0 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E557"),
+ },
+ },
+ {
/* Fujitsu LIFEBOOK U745 does not work with crc_enabled == 0 */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index 131df9d..16c3046 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -176,6 +176,12 @@
NULL
};
+static const char * const forcepad_pnp_ids[] = {
+ "SYN300D",
+ "SYN3014",
+ NULL
+};
+
/*
* Send a command to the synpatics touchpad by special commands
*/
@@ -397,6 +403,8 @@
{
int error;
+ memset(info, 0, sizeof(*info));
+
error = synaptics_identify(psmouse, info);
if (error)
return error;
@@ -480,13 +488,6 @@
{ }
};
-/* This list has been kindly provided by Synaptics. */
-static const char * const forcepad_pnp_ids[] = {
- "SYN300D",
- "SYN3014",
- NULL
-};
-
/*****************************************************************************
* Synaptics communications functions
****************************************************************************/
@@ -1687,7 +1688,8 @@
SYNAPTICS_INTERTOUCH_ON,
};
-static int synaptics_intertouch = SYNAPTICS_INTERTOUCH_NOT_SET;
+static int synaptics_intertouch = IS_ENABLED(CONFIG_RMI4_SMB) ?
+ SYNAPTICS_INTERTOUCH_NOT_SET : SYNAPTICS_INTERTOUCH_OFF;
module_param_named(synaptics_intertouch, synaptics_intertouch, int, 0644);
MODULE_PARM_DESC(synaptics_intertouch, "Use a secondary bus for the Synaptics device.");
@@ -1737,8 +1739,16 @@
if (synaptics_intertouch == SYNAPTICS_INTERTOUCH_NOT_SET) {
if (!psmouse_matches_pnp_id(psmouse, topbuttonpad_pnp_ids) &&
- !psmouse_matches_pnp_id(psmouse, smbus_pnp_ids))
+ !psmouse_matches_pnp_id(psmouse, smbus_pnp_ids)) {
+
+ if (!psmouse_matches_pnp_id(psmouse, forcepad_pnp_ids))
+ psmouse_info(psmouse,
+ "Your touchpad (%s) says it can support a different bus. "
+ "If i2c-hid and hid-rmi are not used, you might want to try setting psmouse.synaptics_intertouch to 1 and report this to linux-input@vger.kernel.org.\n",
+ psmouse->ps2dev.serio->firmware_id);
+
return -ENXIO;
+ }
}
psmouse_info(psmouse, "Trying to set up SMBus access\n");
@@ -1810,6 +1820,15 @@
}
if (SYN_CAP_INTERTOUCH(info.ext_cap_0c)) {
+ if ((!IS_ENABLED(CONFIG_RMI4_SMB) ||
+ !IS_ENABLED(CONFIG_MOUSE_PS2_SYNAPTICS_SMBUS)) &&
+ /* Forcepads need F21, which is not ready */
+ !psmouse_matches_pnp_id(psmouse, forcepad_pnp_ids)) {
+ psmouse_warn(psmouse,
+ "The touchpad can support a better bus than the too old PS/2 protocol. "
+ "Make sure MOUSE_PS2_SYNAPTICS_SMBUS and RMI4_SMB are enabled to get a better touchpad experience.\n");
+ }
+
error = synaptics_setup_intertouch(psmouse, &info, true);
if (!error)
return PSMOUSE_SYNAPTICS_SMBUS;
diff --git a/drivers/input/rmi4/rmi_f03.c b/drivers/input/rmi4/rmi_f03.c
index 77dad04..ad71a5e 100644
--- a/drivers/input/rmi4/rmi_f03.c
+++ b/drivers/input/rmi4/rmi_f03.c
@@ -146,7 +146,7 @@
if (!serio)
return -ENOMEM;
- serio->id.type = SERIO_8042;
+ serio->id.type = SERIO_PS_PSTHRU;
serio->write = rmi_f03_pt_write;
serio->port_data = f03;
diff --git a/drivers/input/touchscreen/silead.c b/drivers/input/touchscreen/silead.c
index 813dd68..0dbcf10 100644
--- a/drivers/input/touchscreen/silead.c
+++ b/drivers/input/touchscreen/silead.c
@@ -526,6 +526,7 @@
{
struct i2c_client *client = to_i2c_client(dev);
+ disable_irq(client->irq);
silead_ts_set_power(client, SILEAD_POWER_OFF);
return 0;
}
@@ -551,6 +552,8 @@
return -ENODEV;
}
+ enable_irq(client->irq);
+
return 0;
}
diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c
index 9f44ee8..19779b8 100644
--- a/drivers/iommu/of_iommu.c
+++ b/drivers/iommu/of_iommu.c
@@ -118,6 +118,7 @@
ops = iommu_ops_from_fwnode(fwnode);
if ((ops && !ops->of_xlate) ||
+ !of_device_is_available(iommu_spec->np) ||
(!ops && !of_iommu_driver_present(iommu_spec->np)))
return NULL;
@@ -236,6 +237,12 @@
ops = ERR_PTR(err);
}
+ /* Ignore all other errors apart from EPROBE_DEFER */
+ if (IS_ERR(ops) && (PTR_ERR(ops) != -EPROBE_DEFER)) {
+ dev_dbg(dev, "Adding to IOMMU failed: %ld\n", PTR_ERR(ops));
+ ops = NULL;
+ }
+
return ops;
}
diff --git a/drivers/irqchip/irq-xtensa-mx.c b/drivers/irqchip/irq-xtensa-mx.c
index bb3ac5f..72a391e 100644
--- a/drivers/irqchip/irq-xtensa-mx.c
+++ b/drivers/irqchip/irq-xtensa-mx.c
@@ -142,7 +142,7 @@
int __init xtensa_mx_init_legacy(struct device_node *interrupt_parent)
{
struct irq_domain *root_domain =
- irq_domain_add_legacy(NULL, NR_IRQS, 0, 0,
+ irq_domain_add_legacy(NULL, NR_IRQS - 1, 1, 0,
&xtensa_mx_irq_domain_ops,
&xtensa_mx_irq_chip);
irq_set_default_host(root_domain);
diff --git a/drivers/irqchip/irq-xtensa-pic.c b/drivers/irqchip/irq-xtensa-pic.c
index 472ae17..f728755 100644
--- a/drivers/irqchip/irq-xtensa-pic.c
+++ b/drivers/irqchip/irq-xtensa-pic.c
@@ -89,7 +89,7 @@
int __init xtensa_pic_init_legacy(struct device_node *interrupt_parent)
{
struct irq_domain *root_domain =
- irq_domain_add_legacy(NULL, NR_IRQS, 0, 0,
+ irq_domain_add_legacy(NULL, NR_IRQS - 1, 1, 0,
&xtensa_irq_domain_ops, &xtensa_irq_chip);
irq_set_default_host(root_domain);
return 0;
diff --git a/drivers/isdn/i4l/isdn_ppp.c b/drivers/isdn/i4l/isdn_ppp.c
index d07dd51..8aa158a 100644
--- a/drivers/isdn/i4l/isdn_ppp.c
+++ b/drivers/isdn/i4l/isdn_ppp.c
@@ -2364,7 +2364,7 @@
id);
return NULL;
} else {
- rs = kzalloc(sizeof(struct ippp_ccp_reset_state), GFP_KERNEL);
+ rs = kzalloc(sizeof(struct ippp_ccp_reset_state), GFP_ATOMIC);
if (!rs)
return NULL;
rs->state = CCPResetIdle;
diff --git a/drivers/isdn/mISDN/stack.c b/drivers/isdn/mISDN/stack.c
index 8b7faea..422dced 100644
--- a/drivers/isdn/mISDN/stack.c
+++ b/drivers/isdn/mISDN/stack.c
@@ -75,7 +75,7 @@
if (sk->sk_state != MISDN_BOUND)
continue;
if (!cskb)
- cskb = skb_copy(skb, GFP_KERNEL);
+ cskb = skb_copy(skb, GFP_ATOMIC);
if (!cskb) {
printk(KERN_WARNING "%s no skb\n", __func__);
break;
diff --git a/drivers/leds/leds-bcm6328.c b/drivers/leds/leds-bcm6328.c
index 1548259..2cfd938 100644
--- a/drivers/leds/leds-bcm6328.c
+++ b/drivers/leds/leds-bcm6328.c
@@ -242,7 +242,7 @@
spin_lock_irqsave(lock, flags);
val = bcm6328_led_read(addr);
- val |= (BIT(reg) << (((sel % 4) * 4) + 16));
+ val |= (BIT(reg % 4) << (((sel % 4) * 4) + 16));
bcm6328_led_write(addr, val);
spin_unlock_irqrestore(lock, flags);
}
@@ -269,7 +269,7 @@
spin_lock_irqsave(lock, flags);
val = bcm6328_led_read(addr);
- val |= (BIT(reg) << ((sel % 4) * 4));
+ val |= (BIT(reg % 4) << ((sel % 4) * 4));
bcm6328_led_write(addr, val);
spin_unlock_irqrestore(lock, flags);
}
diff --git a/drivers/leds/trigger/ledtrig-heartbeat.c b/drivers/leds/trigger/ledtrig-heartbeat.c
index afa3b40..e95ea65 100644
--- a/drivers/leds/trigger/ledtrig-heartbeat.c
+++ b/drivers/leds/trigger/ledtrig-heartbeat.c
@@ -20,7 +20,6 @@
#include <linux/sched/loadavg.h>
#include <linux/leds.h>
#include <linux/reboot.h>
-#include <linux/suspend.h>
#include "../leds.h"
static int panic_heartbeats;
@@ -163,30 +162,6 @@
.deactivate = heartbeat_trig_deactivate,
};
-static int heartbeat_pm_notifier(struct notifier_block *nb,
- unsigned long pm_event, void *unused)
-{
- int rc;
-
- switch (pm_event) {
- case PM_SUSPEND_PREPARE:
- case PM_HIBERNATION_PREPARE:
- case PM_RESTORE_PREPARE:
- led_trigger_unregister(&heartbeat_led_trigger);
- break;
- case PM_POST_SUSPEND:
- case PM_POST_HIBERNATION:
- case PM_POST_RESTORE:
- rc = led_trigger_register(&heartbeat_led_trigger);
- if (rc)
- pr_err("could not re-register heartbeat trigger\n");
- break;
- default:
- break;
- }
- return NOTIFY_DONE;
-}
-
static int heartbeat_reboot_notifier(struct notifier_block *nb,
unsigned long code, void *unused)
{
@@ -201,10 +176,6 @@
return NOTIFY_DONE;
}
-static struct notifier_block heartbeat_pm_nb = {
- .notifier_call = heartbeat_pm_notifier,
-};
-
static struct notifier_block heartbeat_reboot_nb = {
.notifier_call = heartbeat_reboot_notifier,
};
@@ -221,14 +192,12 @@
atomic_notifier_chain_register(&panic_notifier_list,
&heartbeat_panic_nb);
register_reboot_notifier(&heartbeat_reboot_nb);
- register_pm_notifier(&heartbeat_pm_nb);
}
return rc;
}
static void __exit heartbeat_trig_exit(void)
{
- unregister_pm_notifier(&heartbeat_pm_nb);
unregister_reboot_notifier(&heartbeat_reboot_nb);
atomic_notifier_chain_unregister(&panic_notifier_list,
&heartbeat_panic_nb);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 212a677..87edc34 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -5174,6 +5174,18 @@
static void no_op(struct percpu_ref *r) {}
+int mddev_init_writes_pending(struct mddev *mddev)
+{
+ if (mddev->writes_pending.percpu_count_ptr)
+ return 0;
+ if (percpu_ref_init(&mddev->writes_pending, no_op, 0, GFP_KERNEL) < 0)
+ return -ENOMEM;
+ /* We want to start with the refcount at zero */
+ percpu_ref_put(&mddev->writes_pending);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mddev_init_writes_pending);
+
static int md_alloc(dev_t dev, char *name)
{
/*
@@ -5239,10 +5251,6 @@
blk_queue_make_request(mddev->queue, md_make_request);
blk_set_stacking_limits(&mddev->queue->limits);
- if (percpu_ref_init(&mddev->writes_pending, no_op, 0, GFP_KERNEL) < 0)
- goto abort;
- /* We want to start with the refcount at zero */
- percpu_ref_put(&mddev->writes_pending);
disk = alloc_disk(1 << shift);
if (!disk) {
blk_cleanup_queue(mddev->queue);
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 11f1514..0fa1de4 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -648,6 +648,7 @@
extern void md_wakeup_thread(struct md_thread *thread);
extern void md_check_recovery(struct mddev *mddev);
extern void md_reap_sync_thread(struct mddev *mddev);
+extern int mddev_init_writes_pending(struct mddev *mddev);
extern void md_write_start(struct mddev *mddev, struct bio *bi);
extern void md_write_inc(struct mddev *mddev, struct bio *bi);
extern void md_write_end(struct mddev *mddev);
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index af5056d..e1a7e3d 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -3063,6 +3063,8 @@
mdname(mddev));
return -EIO;
}
+ if (mddev_init_writes_pending(mddev) < 0)
+ return -ENOMEM;
/*
* copy the already verified devices into our private RAID1
* bookkeeping area. [whatever we allocate in run(),
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 4343d7f..797ed60 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -3611,6 +3611,9 @@
int first = 1;
bool discard_supported = false;
+ if (mddev_init_writes_pending(mddev) < 0)
+ return -ENOMEM;
+
if (mddev->private == NULL) {
conf = setup_conf(mddev);
if (IS_ERR(conf))
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 7220646..ec0f951 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -7118,6 +7118,9 @@
long long min_offset_diff = 0;
int first = 1;
+ if (mddev_init_writes_pending(mddev) < 0)
+ return -ENOMEM;
+
if (mddev->recovery_cp != MaxSector)
pr_notice("md/raid:%s: not clean -- starting background reconstruction\n",
mdname(mddev));
diff --git a/drivers/media/Kconfig b/drivers/media/Kconfig
index b72edd2..55d9c2b 100644
--- a/drivers/media/Kconfig
+++ b/drivers/media/Kconfig
@@ -2,6 +2,12 @@
# Multimedia device configuration
#
+config CEC_CORE
+ tristate
+
+config CEC_NOTIFIER
+ bool
+
menuconfig MEDIA_SUPPORT
tristate "Multimedia support"
depends on HAS_IOMEM
diff --git a/drivers/media/Makefile b/drivers/media/Makefile
index 523fea3..044503a 100644
--- a/drivers/media/Makefile
+++ b/drivers/media/Makefile
@@ -4,8 +4,6 @@
media-objs := media-device.o media-devnode.o media-entity.o
-obj-$(CONFIG_CEC_CORE) += cec/
-
#
# I2C drivers should come before other drivers, otherwise they'll fail
# when compiled as builtin drivers
@@ -26,6 +24,8 @@
# There are both core and drivers at RC subtree - merge before drivers
obj-y += rc/
+obj-$(CONFIG_CEC_CORE) += cec/
+
#
# Finally, merge the drivers that require the core
#
diff --git a/drivers/media/cec/Kconfig b/drivers/media/cec/Kconfig
index f944d93..43428cec 100644
--- a/drivers/media/cec/Kconfig
+++ b/drivers/media/cec/Kconfig
@@ -1,19 +1,6 @@
-config CEC_CORE
- tristate
- depends on MEDIA_CEC_SUPPORT
- default y
-
-config MEDIA_CEC_NOTIFIER
- bool
-
config MEDIA_CEC_RC
bool "HDMI CEC RC integration"
depends on CEC_CORE && RC_CORE
+ depends on CEC_CORE=m || RC_CORE=y
---help---
Pass on CEC remote control messages to the RC framework.
-
-config MEDIA_CEC_DEBUG
- bool "HDMI CEC debugfs interface"
- depends on CEC_CORE && DEBUG_FS
- ---help---
- Turns on the DebugFS interface for CEC devices.
diff --git a/drivers/media/cec/Makefile b/drivers/media/cec/Makefile
index 402a6c6..eaf408e 100644
--- a/drivers/media/cec/Makefile
+++ b/drivers/media/cec/Makefile
@@ -1,6 +1,6 @@
cec-objs := cec-core.o cec-adap.o cec-api.o cec-edid.o
-ifeq ($(CONFIG_MEDIA_CEC_NOTIFIER),y)
+ifeq ($(CONFIG_CEC_NOTIFIER),y)
cec-objs += cec-notifier.o
endif
diff --git a/drivers/media/cec/cec-adap.c b/drivers/media/cec/cec-adap.c
index f5fe01c..9dfc798 100644
--- a/drivers/media/cec/cec-adap.c
+++ b/drivers/media/cec/cec-adap.c
@@ -1864,7 +1864,7 @@
WARN_ON(call_op(adap, adap_monitor_all_enable, 0));
}
-#ifdef CONFIG_MEDIA_CEC_DEBUG
+#ifdef CONFIG_DEBUG_FS
/*
* Log the current state of the CEC adapter.
* Very useful for debugging.
diff --git a/drivers/media/cec/cec-api.c b/drivers/media/cec/cec-api.c
index 0860fb4..999926f 100644
--- a/drivers/media/cec/cec-api.c
+++ b/drivers/media/cec/cec-api.c
@@ -271,16 +271,10 @@
bool block, struct cec_msg __user *parg)
{
struct cec_msg msg = {};
- long err = 0;
+ long err;
if (copy_from_user(&msg, parg, sizeof(msg)))
return -EFAULT;
- mutex_lock(&adap->lock);
- if (!adap->is_configured && fh->mode_follower < CEC_MODE_MONITOR)
- err = -ENONET;
- mutex_unlock(&adap->lock);
- if (err)
- return err;
err = cec_receive_msg(fh, &msg, block);
if (err)
diff --git a/drivers/media/cec/cec-core.c b/drivers/media/cec/cec-core.c
index f9ebff9..2f87748 100644
--- a/drivers/media/cec/cec-core.c
+++ b/drivers/media/cec/cec-core.c
@@ -187,7 +187,7 @@
put_device(&devnode->dev);
}
-#ifdef CONFIG_MEDIA_CEC_NOTIFIER
+#ifdef CONFIG_CEC_NOTIFIER
static void cec_cec_notify(struct cec_adapter *adap, u16 pa)
{
cec_s_phys_addr(adap, pa, false);
@@ -323,7 +323,7 @@
}
dev_set_drvdata(&adap->devnode.dev, adap);
-#ifdef CONFIG_MEDIA_CEC_DEBUG
+#ifdef CONFIG_DEBUG_FS
if (!top_cec_dir)
return 0;
@@ -355,7 +355,7 @@
adap->rc = NULL;
#endif
debugfs_remove_recursive(adap->cec_dir);
-#ifdef CONFIG_MEDIA_CEC_NOTIFIER
+#ifdef CONFIG_CEC_NOTIFIER
if (adap->notifier)
cec_notifier_unregister(adap->notifier);
#endif
@@ -395,7 +395,7 @@
return ret;
}
-#ifdef CONFIG_MEDIA_CEC_DEBUG
+#ifdef CONFIG_DEBUG_FS
top_cec_dir = debugfs_create_dir("cec", NULL);
if (IS_ERR_OR_NULL(top_cec_dir)) {
pr_warn("cec: Failed to create debugfs cec dir\n");
diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig
index fd181c9..aaa9471 100644
--- a/drivers/media/i2c/Kconfig
+++ b/drivers/media/i2c/Kconfig
@@ -220,7 +220,8 @@
config VIDEO_ADV7604_CEC
bool "Enable Analog Devices ADV7604 CEC support"
- depends on VIDEO_ADV7604 && CEC_CORE
+ depends on VIDEO_ADV7604
+ select CEC_CORE
---help---
When selected the adv7604 will support the optional
HDMI CEC feature.
@@ -240,7 +241,8 @@
config VIDEO_ADV7842_CEC
bool "Enable Analog Devices ADV7842 CEC support"
- depends on VIDEO_ADV7842 && CEC_CORE
+ depends on VIDEO_ADV7842
+ select CEC_CORE
---help---
When selected the adv7842 will support the optional
HDMI CEC feature.
@@ -478,7 +480,8 @@
config VIDEO_ADV7511_CEC
bool "Enable Analog Devices ADV7511 CEC support"
- depends on VIDEO_ADV7511 && CEC_CORE
+ depends on VIDEO_ADV7511
+ select CEC_CORE
---help---
When selected the adv7511 will support the optional
HDMI CEC feature.
diff --git a/drivers/media/i2c/tc358743.c b/drivers/media/i2c/tc358743.c
index acef4ec..3251cba 100644
--- a/drivers/media/i2c/tc358743.c
+++ b/drivers/media/i2c/tc358743.c
@@ -223,7 +223,7 @@
static void i2c_wr8_and_or(struct v4l2_subdev *sd, u16 reg,
u8 mask, u8 val)
{
- i2c_wrreg(sd, reg, (i2c_rdreg(sd, reg, 2) & mask) | val, 2);
+ i2c_wrreg(sd, reg, (i2c_rdreg(sd, reg, 1) & mask) | val, 1);
}
static u16 i2c_rd16(struct v4l2_subdev *sd, u16 reg)
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index ac026ee..041cb80 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -501,8 +501,9 @@
config VIDEO_SAMSUNG_S5P_CEC
tristate "Samsung S5P CEC driver"
- depends on CEC_CORE && (PLAT_S5P || ARCH_EXYNOS || COMPILE_TEST)
- select MEDIA_CEC_NOTIFIER
+ depends on PLAT_S5P || ARCH_EXYNOS || COMPILE_TEST
+ select CEC_CORE
+ select CEC_NOTIFIER
---help---
This is a driver for Samsung S5P HDMI CEC interface. It uses the
generic CEC framework interface.
@@ -511,8 +512,9 @@
config VIDEO_STI_HDMI_CEC
tristate "STMicroelectronics STiH4xx HDMI CEC driver"
- depends on CEC_CORE && (ARCH_STI || COMPILE_TEST)
- select MEDIA_CEC_NOTIFIER
+ depends on ARCH_STI || COMPILE_TEST
+ select CEC_CORE
+ select CEC_NOTIFIER
---help---
This is a driver for STIH4xx HDMI CEC interface. It uses the
generic CEC framework interface.
diff --git a/drivers/media/platform/vivid/Kconfig b/drivers/media/platform/vivid/Kconfig
index b36ac19..154de92 100644
--- a/drivers/media/platform/vivid/Kconfig
+++ b/drivers/media/platform/vivid/Kconfig
@@ -26,7 +26,8 @@
config VIDEO_VIVID_CEC
bool "Enable CEC emulation support"
- depends on VIDEO_VIVID && CEC_CORE
+ depends on VIDEO_VIVID
+ select CEC_CORE
---help---
When selected the vivid module will emulate the optional
HDMI CEC feature.
diff --git a/drivers/media/rc/rc-ir-raw.c b/drivers/media/rc/rc-ir-raw.c
index 90f66dc..a2fc1a1 100644
--- a/drivers/media/rc/rc-ir-raw.c
+++ b/drivers/media/rc/rc-ir-raw.c
@@ -211,7 +211,7 @@
*/
void ir_raw_event_handle(struct rc_dev *dev)
{
- if (!dev->raw)
+ if (!dev->raw || !dev->raw->thread)
return;
wake_up_process(dev->raw->thread);
@@ -490,6 +490,7 @@
{
int rc;
struct ir_raw_handler *handler;
+ struct task_struct *thread;
if (!dev)
return -EINVAL;
@@ -507,13 +508,15 @@
* because the event is coming from userspace
*/
if (dev->driver_type != RC_DRIVER_IR_RAW_TX) {
- dev->raw->thread = kthread_run(ir_raw_event_thread, dev->raw,
- "rc%u", dev->minor);
+ thread = kthread_run(ir_raw_event_thread, dev->raw, "rc%u",
+ dev->minor);
- if (IS_ERR(dev->raw->thread)) {
- rc = PTR_ERR(dev->raw->thread);
+ if (IS_ERR(thread)) {
+ rc = PTR_ERR(thread);
goto out;
}
+
+ dev->raw->thread = thread;
}
mutex_lock(&ir_raw_handler_lock);
diff --git a/drivers/media/rc/sir_ir.c b/drivers/media/rc/sir_ir.c
index e12ec50..90a5f8f 100644
--- a/drivers/media/rc/sir_ir.c
+++ b/drivers/media/rc/sir_ir.c
@@ -183,9 +183,15 @@
static unsigned long delt;
unsigned long deltintr;
unsigned long flags;
+ int counter = 0;
int iir, lsr;
while ((iir = inb(io + UART_IIR) & UART_IIR_ID)) {
+ if (++counter > 256) {
+ dev_err(&sir_ir_dev->dev, "Trapped in interrupt");
+ break;
+ }
+
switch (iir & UART_IIR_ID) { /* FIXME toto treba preriedit */
case UART_IIR_MSI:
(void)inb(io + UART_MSR);
diff --git a/drivers/media/usb/pulse8-cec/Kconfig b/drivers/media/usb/pulse8-cec/Kconfig
index 8937f39..18ead44 100644
--- a/drivers/media/usb/pulse8-cec/Kconfig
+++ b/drivers/media/usb/pulse8-cec/Kconfig
@@ -1,6 +1,7 @@
config USB_PULSE8_CEC
tristate "Pulse Eight HDMI CEC"
- depends on USB_ACM && CEC_CORE
+ depends on USB_ACM
+ select CEC_CORE
select SERIO
select SERIO_SERPORT
---help---
diff --git a/drivers/media/usb/rainshadow-cec/Kconfig b/drivers/media/usb/rainshadow-cec/Kconfig
index 3eb8660..030ef01 100644
--- a/drivers/media/usb/rainshadow-cec/Kconfig
+++ b/drivers/media/usb/rainshadow-cec/Kconfig
@@ -1,6 +1,7 @@
config USB_RAINSHADOW_CEC
tristate "RainShadow Tech HDMI CEC"
- depends on USB_ACM && CEC_CORE
+ depends on USB_ACM
+ select CEC_CORE
select SERIO
select SERIO_SERPORT
---help---
diff --git a/drivers/media/usb/rainshadow-cec/rainshadow-cec.c b/drivers/media/usb/rainshadow-cec/rainshadow-cec.c
index 541ca54..4126552 100644
--- a/drivers/media/usb/rainshadow-cec/rainshadow-cec.c
+++ b/drivers/media/usb/rainshadow-cec/rainshadow-cec.c
@@ -119,7 +119,7 @@
while (true) {
unsigned long flags;
- bool exit_loop;
+ bool exit_loop = false;
char data;
spin_lock_irqsave(&rain->buf_lock, flags);
@@ -336,6 +336,7 @@
serio_set_drvdata(serio, rain);
INIT_WORK(&rain->work, rain_irq_work_handler);
mutex_init(&rain->write_lock);
+ spin_lock_init(&rain->buf_lock);
err = serio_open(serio, drv);
if (err)
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
index 94afbbf9..c0175ea 100644
--- a/drivers/media/v4l2-core/videobuf2-core.c
+++ b/drivers/media/v4l2-core/videobuf2-core.c
@@ -868,7 +868,7 @@
void *vb2_plane_vaddr(struct vb2_buffer *vb, unsigned int plane_no)
{
- if (plane_no > vb->num_planes || !vb->planes[plane_no].mem_priv)
+ if (plane_no >= vb->num_planes || !vb->planes[plane_no].mem_priv)
return NULL;
return call_ptr_memop(vb, vaddr, vb->planes[plane_no].mem_priv);
diff --git a/drivers/memory/atmel-ebi.c b/drivers/memory/atmel-ebi.c
index 35910f9..99e644c 100644
--- a/drivers/memory/atmel-ebi.c
+++ b/drivers/memory/atmel-ebi.c
@@ -581,7 +581,7 @@
return of_platform_populate(np, NULL, NULL, dev);
}
-static int atmel_ebi_resume(struct device *dev)
+static __maybe_unused int atmel_ebi_resume(struct device *dev)
{
struct atmel_ebi *ebi = dev_get_drvdata(dev);
struct atmel_ebi_dev *ebid;
diff --git a/drivers/misc/cxl/file.c b/drivers/misc/cxl/file.c
index 17b433f..0761271 100644
--- a/drivers/misc/cxl/file.c
+++ b/drivers/misc/cxl/file.c
@@ -159,11 +159,8 @@
/* Do this outside the status_mutex to avoid a circular dependency with
* the locking in cxl_mmap_fault() */
- if (copy_from_user(&work, uwork,
- sizeof(struct cxl_ioctl_start_work))) {
- rc = -EFAULT;
- goto out;
- }
+ if (copy_from_user(&work, uwork, sizeof(work)))
+ return -EFAULT;
mutex_lock(&ctx->status_mutex);
if (ctx->status != OPENED) {
diff --git a/drivers/misc/cxl/native.c b/drivers/misc/cxl/native.c
index 871a2f0..8d6ea97 100644
--- a/drivers/misc/cxl/native.c
+++ b/drivers/misc/cxl/native.c
@@ -1302,13 +1302,16 @@
void cxl_native_release_psl_err_irq(struct cxl *adapter)
{
- if (adapter->native->err_virq != irq_find_mapping(NULL, adapter->native->err_hwirq))
+ if (adapter->native->err_virq == 0 ||
+ adapter->native->err_virq !=
+ irq_find_mapping(NULL, adapter->native->err_hwirq))
return;
cxl_p1_write(adapter, CXL_PSL_ErrIVTE, 0x0000000000000000);
cxl_unmap_irq(adapter->native->err_virq, adapter);
cxl_ops->release_one_irq(adapter, adapter->native->err_hwirq);
kfree(adapter->irq_name);
+ adapter->native->err_virq = 0;
}
int cxl_native_register_serr_irq(struct cxl_afu *afu)
@@ -1346,13 +1349,15 @@
void cxl_native_release_serr_irq(struct cxl_afu *afu)
{
- if (afu->serr_virq != irq_find_mapping(NULL, afu->serr_hwirq))
+ if (afu->serr_virq == 0 ||
+ afu->serr_virq != irq_find_mapping(NULL, afu->serr_hwirq))
return;
cxl_p1n_write(afu, CXL_PSL_SERR_An, 0x0000000000000000);
cxl_unmap_irq(afu->serr_virq, afu);
cxl_ops->release_one_irq(afu->adapter, afu->serr_hwirq);
kfree(afu->err_irq_name);
+ afu->serr_virq = 0;
}
int cxl_native_register_psl_irq(struct cxl_afu *afu)
@@ -1375,12 +1380,15 @@
void cxl_native_release_psl_irq(struct cxl_afu *afu)
{
- if (afu->native->psl_virq != irq_find_mapping(NULL, afu->native->psl_hwirq))
+ if (afu->native->psl_virq == 0 ||
+ afu->native->psl_virq !=
+ irq_find_mapping(NULL, afu->native->psl_hwirq))
return;
cxl_unmap_irq(afu->native->psl_virq, afu);
cxl_ops->release_one_irq(afu->adapter, afu->native->psl_hwirq);
kfree(afu->psl_irq_name);
+ afu->native->psl_virq = 0;
}
static void recover_psl_err(struct cxl_afu *afu, u64 errstat)
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
index d1928fd..07aad85 100644
--- a/drivers/misc/mei/bus.c
+++ b/drivers/misc/mei/bus.c
@@ -763,8 +763,10 @@
{
struct mei_cl_device *cldev = to_mei_cl_device(dev);
const uuid_le *uuid = mei_me_cl_uuid(cldev->me_cl);
+ u8 version = mei_me_cl_ver(cldev->me_cl);
- return scnprintf(buf, PAGE_SIZE, "mei:%s:%pUl:", cldev->name, uuid);
+ return scnprintf(buf, PAGE_SIZE, "mei:%s:%pUl:%02X:",
+ cldev->name, uuid, version);
}
static DEVICE_ATTR_RO(modalias);
diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c
index 1842ed3..de962c2 100644
--- a/drivers/mmc/host/meson-gx-mmc.c
+++ b/drivers/mmc/host/meson-gx-mmc.c
@@ -210,6 +210,15 @@
int i;
bool use_desc_chain_mode = true;
+ /*
+ * Broken SDIO with AP6255-based WiFi on Khadas VIM Pro has been
+ * reported. For some strange reason this occurs in descriptor
+ * chain mode only. So let's fall back to bounce buffer mode
+ * for command SD_IO_RW_EXTENDED.
+ */
+ if (mrq->cmd->opcode == SD_IO_RW_EXTENDED)
+ return;
+
for_each_sg(data->sg, sg, data->sg_len, i)
/* check for 8 byte alignment */
if (sg->offset & 7) {
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index d474378..b1dd127 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -202,7 +202,7 @@
return 0;
}
-const struct mtd_ooblayout_ops nand_ooblayout_lp_hamming_ops = {
+static const struct mtd_ooblayout_ops nand_ooblayout_lp_hamming_ops = {
.ecc = nand_ooblayout_ecc_lp_hamming,
.free = nand_ooblayout_free_lp_hamming,
};
@@ -4361,7 +4361,7 @@
/* Initialize the ->data_interface field. */
ret = nand_init_data_interface(chip);
if (ret)
- return ret;
+ goto err_nand_init;
/*
* Setup the data interface correctly on the chip and controller side.
@@ -4373,7 +4373,7 @@
*/
ret = nand_setup_data_interface(chip);
if (ret)
- return ret;
+ goto err_nand_init;
nand_maf_id = chip->id.data[0];
nand_dev_id = chip->id.data[1];
@@ -4404,6 +4404,12 @@
mtd->size = i * chip->chipsize;
return 0;
+
+err_nand_init:
+ /* Free manufacturer priv data. */
+ nand_manufacturer_cleanup(chip);
+
+ return ret;
}
EXPORT_SYMBOL(nand_scan_ident);
@@ -4574,18 +4580,23 @@
/* New bad blocks should be marked in OOB, flash-based BBT, or both */
if (WARN_ON((chip->bbt_options & NAND_BBT_NO_OOB_BBM) &&
- !(chip->bbt_options & NAND_BBT_USE_FLASH)))
- return -EINVAL;
+ !(chip->bbt_options & NAND_BBT_USE_FLASH))) {
+ ret = -EINVAL;
+ goto err_ident;
+ }
if (invalid_ecc_page_accessors(chip)) {
pr_err("Invalid ECC page accessors setup\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_ident;
}
if (!(chip->options & NAND_OWN_BUFFERS)) {
nbuf = kzalloc(sizeof(*nbuf), GFP_KERNEL);
- if (!nbuf)
- return -ENOMEM;
+ if (!nbuf) {
+ ret = -ENOMEM;
+ goto err_ident;
+ }
nbuf->ecccalc = kmalloc(mtd->oobsize, GFP_KERNEL);
if (!nbuf->ecccalc) {
@@ -4608,8 +4619,10 @@
chip->buffers = nbuf;
} else {
- if (!chip->buffers)
- return -ENOMEM;
+ if (!chip->buffers) {
+ ret = -ENOMEM;
+ goto err_ident;
+ }
}
/* Set the internal oob buffer location, just after the page data */
@@ -4842,7 +4855,11 @@
return 0;
/* Build bad block table */
- return chip->scan_bbt(mtd);
+ ret = chip->scan_bbt(mtd);
+ if (ret)
+ goto err_free;
+ return 0;
+
err_free:
if (nbuf) {
kfree(nbuf->databuf);
@@ -4850,6 +4867,13 @@
kfree(nbuf->ecccalc);
kfree(nbuf);
}
+
+err_ident:
+ /* Clean up nand_scan_ident(). */
+
+ /* Free manufacturer priv data. */
+ nand_manufacturer_cleanup(chip);
+
return ret;
}
EXPORT_SYMBOL(nand_scan_tail);
diff --git a/drivers/mtd/nand/nand_ids.c b/drivers/mtd/nand/nand_ids.c
index 9d5ca0e..92e2cf8 100644
--- a/drivers/mtd/nand/nand_ids.c
+++ b/drivers/mtd/nand/nand_ids.c
@@ -6,7 +6,6 @@
* published by the Free Software Foundation.
*
*/
-#include <linux/module.h>
#include <linux/mtd/nand.h>
#include <linux/sizes.h>
diff --git a/drivers/mtd/nand/nand_samsung.c b/drivers/mtd/nand/nand_samsung.c
index 9cfc403..1e07559 100644
--- a/drivers/mtd/nand/nand_samsung.c
+++ b/drivers/mtd/nand/nand_samsung.c
@@ -84,6 +84,9 @@
case 7:
chip->ecc_strength_ds = 60;
break;
+ default:
+ WARN(1, "Could not decode ECC info");
+ chip->ecc_step_ds = 0;
}
}
} else {
diff --git a/drivers/mtd/nand/tango_nand.c b/drivers/mtd/nand/tango_nand.c
index 05b6e10..49b286c 100644
--- a/drivers/mtd/nand/tango_nand.c
+++ b/drivers/mtd/nand/tango_nand.c
@@ -55,10 +55,10 @@
* byte 1 for other packets in the page (PKT_N, for N > 0)
* ERR_COUNT_PKT_N is the max error count over all but the first packet.
*/
-#define DECODE_OK_PKT_0(v) ((v) & BIT(7))
-#define DECODE_OK_PKT_N(v) ((v) & BIT(15))
#define ERR_COUNT_PKT_0(v) (((v) >> 0) & 0x3f)
#define ERR_COUNT_PKT_N(v) (((v) >> 8) & 0x3f)
+#define DECODE_FAIL_PKT_0(v) (((v) & BIT(7)) == 0)
+#define DECODE_FAIL_PKT_N(v) (((v) & BIT(15)) == 0)
/* Offsets relative to pbus_base */
#define PBUS_CS_CTRL 0x83c
@@ -193,6 +193,8 @@
chip->ecc.strength);
if (res < 0)
mtd->ecc_stats.failed++;
+ else
+ mtd->ecc_stats.corrected += res;
bitflips = max(res, bitflips);
buf += pkt_size;
@@ -202,9 +204,11 @@
return bitflips;
}
-static int decode_error_report(struct tango_nfc *nfc)
+static int decode_error_report(struct nand_chip *chip)
{
u32 status, res;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct tango_nfc *nfc = to_tango_nfc(chip->controller);
status = readl_relaxed(nfc->reg_base + NFC_XFER_STATUS);
if (status & PAGE_IS_EMPTY)
@@ -212,10 +216,14 @@
res = readl_relaxed(nfc->mem_base + ERROR_REPORT);
- if (DECODE_OK_PKT_0(res) && DECODE_OK_PKT_N(res))
- return max(ERR_COUNT_PKT_0(res), ERR_COUNT_PKT_N(res));
+ if (DECODE_FAIL_PKT_0(res) || DECODE_FAIL_PKT_N(res))
+ return -EBADMSG;
- return -EBADMSG;
+ /* ERR_COUNT_PKT_N is max, not sum, but that's all we have */
+ mtd->ecc_stats.corrected +=
+ ERR_COUNT_PKT_0(res) + ERR_COUNT_PKT_N(res);
+
+ return max(ERR_COUNT_PKT_0(res), ERR_COUNT_PKT_N(res));
}
static void tango_dma_callback(void *arg)
@@ -282,7 +290,7 @@
if (err)
return err;
- res = decode_error_report(nfc);
+ res = decode_error_report(chip);
if (res < 0) {
chip->ecc.read_oob_raw(mtd, chip, page);
res = check_erased_page(chip, buf);
@@ -663,6 +671,7 @@
{ .compatible = "sigma,smp8758-nand" },
{ /* sentinel */ }
};
+MODULE_DEVICE_TABLE(of, tango_nand_ids);
static struct platform_driver tango_nand_driver = {
.probe = tango_nand_probe,
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index b44a6ae..e5386ab 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -90,10 +90,13 @@
AD_LINK_SPEED_100MBPS,
AD_LINK_SPEED_1000MBPS,
AD_LINK_SPEED_2500MBPS,
+ AD_LINK_SPEED_5000MBPS,
AD_LINK_SPEED_10000MBPS,
+ AD_LINK_SPEED_14000MBPS,
AD_LINK_SPEED_20000MBPS,
AD_LINK_SPEED_25000MBPS,
AD_LINK_SPEED_40000MBPS,
+ AD_LINK_SPEED_50000MBPS,
AD_LINK_SPEED_56000MBPS,
AD_LINK_SPEED_100000MBPS,
};
@@ -259,10 +262,13 @@
* %AD_LINK_SPEED_100MBPS,
* %AD_LINK_SPEED_1000MBPS,
* %AD_LINK_SPEED_2500MBPS,
+ * %AD_LINK_SPEED_5000MBPS,
* %AD_LINK_SPEED_10000MBPS
+ * %AD_LINK_SPEED_14000MBPS,
* %AD_LINK_SPEED_20000MBPS
* %AD_LINK_SPEED_25000MBPS
* %AD_LINK_SPEED_40000MBPS
+ * %AD_LINK_SPEED_50000MBPS
* %AD_LINK_SPEED_56000MBPS
* %AD_LINK_SPEED_100000MBPS
*/
@@ -296,10 +302,18 @@
speed = AD_LINK_SPEED_2500MBPS;
break;
+ case SPEED_5000:
+ speed = AD_LINK_SPEED_5000MBPS;
+ break;
+
case SPEED_10000:
speed = AD_LINK_SPEED_10000MBPS;
break;
+ case SPEED_14000:
+ speed = AD_LINK_SPEED_14000MBPS;
+ break;
+
case SPEED_20000:
speed = AD_LINK_SPEED_20000MBPS;
break;
@@ -312,6 +326,10 @@
speed = AD_LINK_SPEED_40000MBPS;
break;
+ case SPEED_50000:
+ speed = AD_LINK_SPEED_50000MBPS;
+ break;
+
case SPEED_56000:
speed = AD_LINK_SPEED_56000MBPS;
break;
@@ -707,9 +725,15 @@
case AD_LINK_SPEED_2500MBPS:
bandwidth = nports * 2500;
break;
+ case AD_LINK_SPEED_5000MBPS:
+ bandwidth = nports * 5000;
+ break;
case AD_LINK_SPEED_10000MBPS:
bandwidth = nports * 10000;
break;
+ case AD_LINK_SPEED_14000MBPS:
+ bandwidth = nports * 14000;
+ break;
case AD_LINK_SPEED_20000MBPS:
bandwidth = nports * 20000;
break;
@@ -719,6 +743,9 @@
case AD_LINK_SPEED_40000MBPS:
bandwidth = nports * 40000;
break;
+ case AD_LINK_SPEED_50000MBPS:
+ bandwidth = nports * 50000;
+ break;
case AD_LINK_SPEED_56000MBPS:
bandwidth = nports * 56000;
break;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 2359478b..8ab6bdb 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -4192,7 +4192,6 @@
struct bonding *bond = netdev_priv(bond_dev);
if (bond->wq)
destroy_workqueue(bond->wq);
- free_netdev(bond_dev);
}
void bond_setup(struct net_device *bond_dev)
@@ -4212,7 +4211,8 @@
bond_dev->netdev_ops = &bond_netdev_ops;
bond_dev->ethtool_ops = &bond_ethtool_ops;
- bond_dev->destructor = bond_destructor;
+ bond_dev->needs_free_netdev = true;
+ bond_dev->priv_destructor = bond_destructor;
SET_NETDEV_DEVTYPE(bond_dev, &bond_type);
@@ -4736,7 +4736,7 @@
rtnl_unlock();
if (res < 0)
- bond_destructor(bond_dev);
+ free_netdev(bond_dev);
return res;
}
diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c
index ddabce7..71a7c3b 100644
--- a/drivers/net/caif/caif_hsi.c
+++ b/drivers/net/caif/caif_hsi.c
@@ -1121,7 +1121,7 @@
dev->flags = IFF_POINTOPOINT | IFF_NOARP;
dev->mtu = CFHSI_MAX_CAIF_FRAME_SZ;
dev->priv_flags |= IFF_NO_QUEUE;
- dev->destructor = free_netdev;
+ dev->needs_free_netdev = true;
dev->netdev_ops = &cfhsi_netdevops;
for (i = 0; i < CFHSI_PRIO_LAST; ++i)
skb_queue_head_init(&cfhsi->qhead[i]);
diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c
index c2dea49..76e1d35 100644
--- a/drivers/net/caif/caif_serial.c
+++ b/drivers/net/caif/caif_serial.c
@@ -428,7 +428,7 @@
dev->flags = IFF_POINTOPOINT | IFF_NOARP;
dev->mtu = CAIF_MAX_MTU;
dev->priv_flags |= IFF_NO_QUEUE;
- dev->destructor = free_netdev;
+ dev->needs_free_netdev = true;
skb_queue_head_init(&serdev->head);
serdev->common.link_select = CAIF_LINK_LOW_LATENCY;
serdev->common.use_frag = true;
diff --git a/drivers/net/caif/caif_spi.c b/drivers/net/caif/caif_spi.c
index 3a529fb..fc21afe 100644
--- a/drivers/net/caif/caif_spi.c
+++ b/drivers/net/caif/caif_spi.c
@@ -712,7 +712,7 @@
dev->flags = IFF_NOARP | IFF_POINTOPOINT;
dev->priv_flags |= IFF_NO_QUEUE;
dev->mtu = SPI_MAX_PAYLOAD_SIZE;
- dev->destructor = free_netdev;
+ dev->needs_free_netdev = true;
skb_queue_head_init(&cfspi->qhead);
skb_queue_head_init(&cfspi->chead);
cfspi->cfdev.link_select = CAIF_LINK_HIGH_BANDW;
diff --git a/drivers/net/caif/caif_virtio.c b/drivers/net/caif/caif_virtio.c
index 6122768..1794ea0 100644
--- a/drivers/net/caif/caif_virtio.c
+++ b/drivers/net/caif/caif_virtio.c
@@ -617,7 +617,7 @@
netdev->tx_queue_len = 100;
netdev->flags = IFF_POINTOPOINT | IFF_NOARP;
netdev->mtu = CFV_DEF_MTU_SIZE;
- netdev->destructor = free_netdev;
+ netdev->needs_free_netdev = true;
}
/* Create debugfs counters for the device */
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index 611d16a..ae4ed03 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -391,6 +391,9 @@
can_update_state_error_stats(dev, new_state);
priv->state = new_state;
+ if (!cf)
+ return;
+
if (unlikely(new_state == CAN_STATE_BUS_OFF)) {
cf->can_id |= CAN_ERR_BUSOFF;
return;
diff --git a/drivers/net/can/peak_canfd/peak_canfd.c b/drivers/net/can/peak_canfd/peak_canfd.c
index 0d57be5..85268be 100644
--- a/drivers/net/can/peak_canfd/peak_canfd.c
+++ b/drivers/net/can/peak_canfd/peak_canfd.c
@@ -489,7 +489,7 @@
struct pucan_rx_msg *msg_list, int msg_count)
{
void *msg_ptr = msg_list;
- int i, msg_size;
+ int i, msg_size = 0;
for (i = 0; i < msg_count; i++) {
msg_size = peak_canfd_handle_msg(priv, msg_ptr);
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
index eb71737..6a6e896 100644
--- a/drivers/net/can/slcan.c
+++ b/drivers/net/can/slcan.c
@@ -417,7 +417,7 @@
static void slc_free_netdev(struct net_device *dev)
{
int i = dev->base_addr;
- free_netdev(dev);
+
slcan_devs[i] = NULL;
}
@@ -436,7 +436,8 @@
static void slc_setup(struct net_device *dev)
{
dev->netdev_ops = &slc_netdev_ops;
- dev->destructor = slc_free_netdev;
+ dev->needs_free_netdev = true;
+ dev->priv_destructor = slc_free_netdev;
dev->hard_header_len = 0;
dev->addr_len = 0;
@@ -761,8 +762,6 @@
if (sl->tty) {
printk(KERN_ERR "%s: tty discipline still running\n",
dev->name);
- /* Intentionally leak the control block. */
- dev->destructor = NULL;
}
unregister_netdev(dev);
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index eecee7f..afcc131 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -265,6 +265,8 @@
sizeof(*dm),
1000);
+ kfree(dm);
+
return rc;
}
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
index 57913db..1ca76e0 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
@@ -908,8 +908,6 @@
const struct peak_usb_adapter *peak_usb_adapter = NULL;
int i, err = -ENOMEM;
- usb_dev = interface_to_usbdev(intf);
-
/* get corresponding PCAN-USB adapter */
for (i = 0; i < ARRAY_SIZE(peak_usb_adapters_list); i++)
if (peak_usb_adapters_list[i]->device_id == usb_id_product) {
@@ -920,7 +918,7 @@
if (!peak_usb_adapter) {
/* should never come except device_id bad usage in this file */
pr_err("%s: didn't find device id. 0x%x in devices list\n",
- PCAN_USB_DRIVER_NAME, usb_dev->descriptor.idProduct);
+ PCAN_USB_DRIVER_NAME, usb_id_product);
return -ENODEV;
}
diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c
index facca33..a8cb332 100644
--- a/drivers/net/can/vcan.c
+++ b/drivers/net/can/vcan.c
@@ -152,7 +152,7 @@
static void vcan_setup(struct net_device *dev)
{
dev->type = ARPHRD_CAN;
- dev->mtu = CAN_MTU;
+ dev->mtu = CANFD_MTU;
dev->hard_header_len = 0;
dev->addr_len = 0;
dev->tx_queue_len = 0;
@@ -163,7 +163,7 @@
dev->flags |= IFF_ECHO;
dev->netdev_ops = &vcan_netdev_ops;
- dev->destructor = free_netdev;
+ dev->needs_free_netdev = true;
}
static struct rtnl_link_ops vcan_link_ops __read_mostly = {
diff --git a/drivers/net/can/vxcan.c b/drivers/net/can/vxcan.c
index 7fbb247..cfe889e 100644
--- a/drivers/net/can/vxcan.c
+++ b/drivers/net/can/vxcan.c
@@ -150,13 +150,13 @@
static void vxcan_setup(struct net_device *dev)
{
dev->type = ARPHRD_CAN;
- dev->mtu = CAN_MTU;
+ dev->mtu = CANFD_MTU;
dev->hard_header_len = 0;
dev->addr_len = 0;
dev->tx_queue_len = 0;
dev->flags = (IFF_NOARP|IFF_ECHO);
dev->netdev_ops = &vxcan_netdev_ops;
- dev->destructor = free_netdev;
+ dev->needs_free_netdev = true;
}
/* forward declaration for rtnl_create_link() */
diff --git a/drivers/net/dsa/mv88e6xxx/global2.h b/drivers/net/dsa/mv88e6xxx/global2.h
index 96046bb..14c0be9 100644
--- a/drivers/net/dsa/mv88e6xxx/global2.h
+++ b/drivers/net/dsa/mv88e6xxx/global2.h
@@ -114,13 +114,13 @@
return -EOPNOTSUPP;
}
-int mv88e6xxx_g2_pvt_write(struct mv88e6xxx_chip *chip, int src_dev,
- int src_port, u16 data)
+static inline int mv88e6xxx_g2_pvt_write(struct mv88e6xxx_chip *chip,
+ int src_dev, int src_port, u16 data)
{
return -EOPNOTSUPP;
}
-int mv88e6xxx_g2_misc_4_bit_port(struct mv88e6xxx_chip *chip)
+static inline int mv88e6xxx_g2_misc_4_bit_port(struct mv88e6xxx_chip *chip)
{
return -EOPNOTSUPP;
}
diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
index 149244a..9905b52 100644
--- a/drivers/net/dummy.c
+++ b/drivers/net/dummy.c
@@ -328,7 +328,6 @@
struct dummy_priv *priv = netdev_priv(dev);
kfree(priv->vfinfo);
- free_netdev(dev);
}
static void dummy_setup(struct net_device *dev)
@@ -338,7 +337,8 @@
/* Initialize the device structure. */
dev->netdev_ops = &dummy_netdev_ops;
dev->ethtool_ops = &dummy_ethtool_ops;
- dev->destructor = dummy_free_netdev;
+ dev->needs_free_netdev = true;
+ dev->priv_destructor = dummy_free_netdev;
/* Fill in device structure with ethernet-generic values. */
dev->flags |= IFF_NOARP;
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
index 08d11ce..f5b237e 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_com.c
@@ -61,6 +61,8 @@
#define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF
+#define ENA_REGS_ADMIN_INTR_MASK 1
+
/*****************************************************************************/
/*****************************************************************************/
/*****************************************************************************/
@@ -232,11 +234,9 @@
tail_masked = admin_queue->sq.tail & queue_size_mask;
/* In case of queue FULL */
- cnt = admin_queue->sq.tail - admin_queue->sq.head;
+ cnt = atomic_read(&admin_queue->outstanding_cmds);
if (cnt >= admin_queue->q_depth) {
- pr_debug("admin queue is FULL (tail %d head %d depth: %d)\n",
- admin_queue->sq.tail, admin_queue->sq.head,
- admin_queue->q_depth);
+ pr_debug("admin queue is full.\n");
admin_queue->stats.out_of_space++;
return ERR_PTR(-ENOSPC);
}
@@ -508,15 +508,20 @@
static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx,
struct ena_com_admin_queue *admin_queue)
{
- unsigned long flags;
- u32 start_time;
+ unsigned long flags, timeout;
int ret;
- start_time = ((u32)jiffies_to_usecs(jiffies));
+ timeout = jiffies + ADMIN_CMD_TIMEOUT_US;
- while (comp_ctx->status == ENA_CMD_SUBMITTED) {
- if ((((u32)jiffies_to_usecs(jiffies)) - start_time) >
- ADMIN_CMD_TIMEOUT_US) {
+ while (1) {
+ spin_lock_irqsave(&admin_queue->q_lock, flags);
+ ena_com_handle_admin_completion(admin_queue);
+ spin_unlock_irqrestore(&admin_queue->q_lock, flags);
+
+ if (comp_ctx->status != ENA_CMD_SUBMITTED)
+ break;
+
+ if (time_is_before_jiffies(timeout)) {
pr_err("Wait for completion (polling) timeout\n");
/* ENA didn't have any completion */
spin_lock_irqsave(&admin_queue->q_lock, flags);
@@ -528,10 +533,6 @@
goto err;
}
- spin_lock_irqsave(&admin_queue->q_lock, flags);
- ena_com_handle_admin_completion(admin_queue);
- spin_unlock_irqrestore(&admin_queue->q_lock, flags);
-
msleep(100);
}
@@ -1455,6 +1456,12 @@
void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling)
{
+ u32 mask_value = 0;
+
+ if (polling)
+ mask_value = ENA_REGS_ADMIN_INTR_MASK;
+
+ writel(mask_value, ena_dev->reg_bar + ENA_REGS_INTR_MASK_OFF);
ena_dev->admin_queue.polling = polling;
}
diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
index 67b2338f..3ee55e2 100644
--- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c
+++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
@@ -80,7 +80,6 @@
ENA_STAT_TX_ENTRY(tx_poll),
ENA_STAT_TX_ENTRY(doorbells),
ENA_STAT_TX_ENTRY(prepare_ctx_err),
- ENA_STAT_TX_ENTRY(missing_tx_comp),
ENA_STAT_TX_ENTRY(bad_req_id),
};
@@ -94,6 +93,7 @@
ENA_STAT_RX_ENTRY(dma_mapping_err),
ENA_STAT_RX_ENTRY(bad_desc_num),
ENA_STAT_RX_ENTRY(rx_copybreak_pkt),
+ ENA_STAT_RX_ENTRY(empty_rx_ring),
};
static const struct ena_stats ena_stats_ena_com_strings[] = {
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 7c1214d..4f16ed3 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -190,6 +190,7 @@
rxr->sgl_size = adapter->max_rx_sgl_size;
rxr->smoothed_interval =
ena_com_get_nonadaptive_moderation_interval_rx(ena_dev);
+ rxr->empty_rx_queue = 0;
}
}
@@ -1078,6 +1079,26 @@
rx_ring->per_napi_bytes = 0;
}
+static inline void ena_unmask_interrupt(struct ena_ring *tx_ring,
+ struct ena_ring *rx_ring)
+{
+ struct ena_eth_io_intr_reg intr_reg;
+
+ /* Update intr register: rx intr delay,
+ * tx intr delay and interrupt unmask
+ */
+ ena_com_update_intr_reg(&intr_reg,
+ rx_ring->smoothed_interval,
+ tx_ring->smoothed_interval,
+ true);
+
+ /* It is a shared MSI-X.
+ * Tx and Rx CQ have pointer to it.
+ * So we use one of them to reach the intr reg
+ */
+ ena_com_unmask_intr(rx_ring->ena_com_io_cq, &intr_reg);
+}
+
static inline void ena_update_ring_numa_node(struct ena_ring *tx_ring,
struct ena_ring *rx_ring)
{
@@ -1108,7 +1129,6 @@
{
struct ena_napi *ena_napi = container_of(napi, struct ena_napi, napi);
struct ena_ring *tx_ring, *rx_ring;
- struct ena_eth_io_intr_reg intr_reg;
u32 tx_work_done;
u32 rx_work_done;
@@ -1149,22 +1169,9 @@
if (ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev))
ena_adjust_intr_moderation(rx_ring, tx_ring);
- /* Update intr register: rx intr delay,
- * tx intr delay and interrupt unmask
- */
- ena_com_update_intr_reg(&intr_reg,
- rx_ring->smoothed_interval,
- tx_ring->smoothed_interval,
- true);
-
- /* It is a shared MSI-X.
- * Tx and Rx CQ have pointer to it.
- * So we use one of them to reach the intr reg
- */
- ena_com_unmask_intr(rx_ring->ena_com_io_cq, &intr_reg);
+ ena_unmask_interrupt(tx_ring, rx_ring);
}
-
ena_update_ring_numa_node(tx_ring, rx_ring);
ret = rx_work_done;
@@ -1485,6 +1492,11 @@
ena_napi_enable_all(adapter);
+ /* Enable completion queues interrupt */
+ for (i = 0; i < adapter->num_queues; i++)
+ ena_unmask_interrupt(&adapter->tx_ring[i],
+ &adapter->rx_ring[i]);
+
/* schedule napi in case we had pending packets
* from the last time we disable napi
*/
@@ -1532,6 +1544,7 @@
"Failed to get TX queue handlers. TX queue num %d rc: %d\n",
qid, rc);
ena_com_destroy_io_queue(ena_dev, ena_qid);
+ return rc;
}
ena_com_update_numa_node(tx_ring->ena_com_io_cq, ctx.numa_node);
@@ -1596,6 +1609,7 @@
"Failed to get RX queue handlers. RX queue num %d rc: %d\n",
qid, rc);
ena_com_destroy_io_queue(ena_dev, ena_qid);
+ return rc;
}
ena_com_update_numa_node(rx_ring->ena_com_io_cq, ctx.numa_node);
@@ -1981,6 +1995,7 @@
tx_info->tx_descs = nb_hw_desc;
tx_info->last_jiffies = jiffies;
+ tx_info->print_once = 0;
tx_ring->next_to_use = ENA_TX_RING_IDX_NEXT(next_to_use,
tx_ring->ring_size);
@@ -2550,13 +2565,44 @@
"Reset attempt failed. Can not reset the device\n");
}
-static void check_for_missing_tx_completions(struct ena_adapter *adapter)
+static int check_missing_comp_in_queue(struct ena_adapter *adapter,
+ struct ena_ring *tx_ring)
{
struct ena_tx_buffer *tx_buf;
unsigned long last_jiffies;
+ u32 missed_tx = 0;
+ int i;
+
+ for (i = 0; i < tx_ring->ring_size; i++) {
+ tx_buf = &tx_ring->tx_buffer_info[i];
+ last_jiffies = tx_buf->last_jiffies;
+ if (unlikely(last_jiffies &&
+ time_is_before_jiffies(last_jiffies + TX_TIMEOUT))) {
+ if (!tx_buf->print_once)
+ netif_notice(adapter, tx_err, adapter->netdev,
+ "Found a Tx that wasn't completed on time, qid %d, index %d.\n",
+ tx_ring->qid, i);
+
+ tx_buf->print_once = 1;
+ missed_tx++;
+
+ if (unlikely(missed_tx > MAX_NUM_OF_TIMEOUTED_PACKETS)) {
+ netif_err(adapter, tx_err, adapter->netdev,
+ "The number of lost tx completions is above the threshold (%d > %d). Reset the device\n",
+ missed_tx, MAX_NUM_OF_TIMEOUTED_PACKETS);
+ set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
+ return -EIO;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static void check_for_missing_tx_completions(struct ena_adapter *adapter)
+{
struct ena_ring *tx_ring;
- int i, j, budget;
- u32 missed_tx;
+ int i, budget, rc;
/* Make sure the driver doesn't turn the device in other process */
smp_rmb();
@@ -2572,31 +2618,9 @@
for (i = adapter->last_monitored_tx_qid; i < adapter->num_queues; i++) {
tx_ring = &adapter->tx_ring[i];
- for (j = 0; j < tx_ring->ring_size; j++) {
- tx_buf = &tx_ring->tx_buffer_info[j];
- last_jiffies = tx_buf->last_jiffies;
- if (unlikely(last_jiffies && time_is_before_jiffies(last_jiffies + TX_TIMEOUT))) {
- netif_notice(adapter, tx_err, adapter->netdev,
- "Found a Tx that wasn't completed on time, qid %d, index %d.\n",
- tx_ring->qid, j);
-
- u64_stats_update_begin(&tx_ring->syncp);
- missed_tx = tx_ring->tx_stats.missing_tx_comp++;
- u64_stats_update_end(&tx_ring->syncp);
-
- /* Clear last jiffies so the lost buffer won't
- * be counted twice.
- */
- tx_buf->last_jiffies = 0;
-
- if (unlikely(missed_tx > MAX_NUM_OF_TIMEOUTED_PACKETS)) {
- netif_err(adapter, tx_err, adapter->netdev,
- "The number of lost tx completion is above the threshold (%d > %d). Reset the device\n",
- missed_tx, MAX_NUM_OF_TIMEOUTED_PACKETS);
- set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
- }
- }
- }
+ rc = check_missing_comp_in_queue(adapter, tx_ring);
+ if (unlikely(rc))
+ return;
budget--;
if (!budget)
@@ -2606,6 +2630,58 @@
adapter->last_monitored_tx_qid = i % adapter->num_queues;
}
+/* trigger napi schedule after 2 consecutive detections */
+#define EMPTY_RX_REFILL 2
+/* For the rare case where the device runs out of Rx descriptors and the
+ * napi handler failed to refill new Rx descriptors (due to a lack of memory
+ * for example).
+ * This case will lead to a deadlock:
+ * The device won't send interrupts since all the new Rx packets will be dropped
+ * The napi handler won't allocate new Rx descriptors so the device will be
+ * able to send new packets.
+ *
+ * This scenario can happen when the kernel's vm.min_free_kbytes is too small.
+ * It is recommended to have at least 512MB, with a minimum of 128MB for
+ * constrained environment).
+ *
+ * When such a situation is detected - Reschedule napi
+ */
+static void check_for_empty_rx_ring(struct ena_adapter *adapter)
+{
+ struct ena_ring *rx_ring;
+ int i, refill_required;
+
+ if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
+ return;
+
+ if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
+ return;
+
+ for (i = 0; i < adapter->num_queues; i++) {
+ rx_ring = &adapter->rx_ring[i];
+
+ refill_required =
+ ena_com_sq_empty_space(rx_ring->ena_com_io_sq);
+ if (unlikely(refill_required == (rx_ring->ring_size - 1))) {
+ rx_ring->empty_rx_queue++;
+
+ if (rx_ring->empty_rx_queue >= EMPTY_RX_REFILL) {
+ u64_stats_update_begin(&rx_ring->syncp);
+ rx_ring->rx_stats.empty_rx_ring++;
+ u64_stats_update_end(&rx_ring->syncp);
+
+ netif_err(adapter, drv, adapter->netdev,
+ "trigger refill for ring %d\n", i);
+
+ napi_schedule(rx_ring->napi);
+ rx_ring->empty_rx_queue = 0;
+ }
+ } else {
+ rx_ring->empty_rx_queue = 0;
+ }
+ }
+}
+
/* Check for keep alive expiration */
static void check_for_missing_keep_alive(struct ena_adapter *adapter)
{
@@ -2660,6 +2736,8 @@
check_for_missing_tx_completions(adapter);
+ check_for_empty_rx_ring(adapter);
+
if (debug_area)
ena_dump_stats_to_buf(adapter, debug_area);
@@ -2840,6 +2918,11 @@
{
int release_bars;
+ if (ena_dev->mem_bar)
+ devm_iounmap(&pdev->dev, ena_dev->mem_bar);
+
+ devm_iounmap(&pdev->dev, ena_dev->reg_bar);
+
release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK;
pci_release_selected_regions(pdev, release_bars);
}
@@ -2927,8 +3010,9 @@
goto err_free_ena_dev;
}
- ena_dev->reg_bar = ioremap(pci_resource_start(pdev, ENA_REG_BAR),
- pci_resource_len(pdev, ENA_REG_BAR));
+ ena_dev->reg_bar = devm_ioremap(&pdev->dev,
+ pci_resource_start(pdev, ENA_REG_BAR),
+ pci_resource_len(pdev, ENA_REG_BAR));
if (!ena_dev->reg_bar) {
dev_err(&pdev->dev, "failed to remap regs bar\n");
rc = -EFAULT;
@@ -2948,8 +3032,9 @@
ena_set_push_mode(pdev, ena_dev, &get_feat_ctx);
if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
- ena_dev->mem_bar = ioremap_wc(pci_resource_start(pdev, ENA_MEM_BAR),
- pci_resource_len(pdev, ENA_MEM_BAR));
+ ena_dev->mem_bar = devm_ioremap_wc(&pdev->dev,
+ pci_resource_start(pdev, ENA_MEM_BAR),
+ pci_resource_len(pdev, ENA_MEM_BAR));
if (!ena_dev->mem_bar) {
rc = -EFAULT;
goto err_device_destroy;
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h
index 0e22bce..a4d3d5e 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.h
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h
@@ -45,7 +45,7 @@
#define DRV_MODULE_VER_MAJOR 1
#define DRV_MODULE_VER_MINOR 1
-#define DRV_MODULE_VER_SUBMINOR 2
+#define DRV_MODULE_VER_SUBMINOR 7
#define DRV_MODULE_NAME "ena"
#ifndef DRV_MODULE_VERSION
@@ -146,7 +146,18 @@
u32 tx_descs;
/* num of buffers used by this skb */
u32 num_of_bufs;
- /* Save the last jiffies to detect missing tx packets */
+
+ /* Used for detect missing tx packets to limit the number of prints */
+ u32 print_once;
+ /* Save the last jiffies to detect missing tx packets
+ *
+ * sets to non zero value on ena_start_xmit and set to zero on
+ * napi and timer_Service_routine.
+ *
+ * while this value is not protected by lock,
+ * a given packet is not expected to be handled by ena_start_xmit
+ * and by napi/timer_service at the same time.
+ */
unsigned long last_jiffies;
struct ena_com_buf bufs[ENA_PKT_MAX_BUFS];
} ____cacheline_aligned;
@@ -170,7 +181,6 @@
u64 napi_comp;
u64 tx_poll;
u64 doorbells;
- u64 missing_tx_comp;
u64 bad_req_id;
};
@@ -184,6 +194,7 @@
u64 dma_mapping_err;
u64 bad_desc_num;
u64 rx_copybreak_pkt;
+ u64 empty_rx_ring;
};
struct ena_ring {
@@ -231,6 +242,7 @@
struct ena_stats_tx tx_stats;
struct ena_stats_rx rx_stats;
};
+ int empty_rx_queue;
} ____cacheline_aligned;
struct ena_stats_dev {
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
index b3bc87f..0a98c36 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
@@ -324,7 +324,7 @@
struct xgbe_ring *ring,
struct xgbe_ring_data *rdata)
{
- int order, ret;
+ int ret;
if (!ring->rx_hdr_pa.pages) {
ret = xgbe_alloc_pages(pdata, &ring->rx_hdr_pa, GFP_ATOMIC, 0);
@@ -333,9 +333,8 @@
}
if (!ring->rx_buf_pa.pages) {
- order = max_t(int, PAGE_ALLOC_COSTLY_ORDER - 1, 0);
ret = xgbe_alloc_pages(pdata, &ring->rx_buf_pa, GFP_ATOMIC,
- order);
+ PAGE_ALLOC_COSTLY_ORDER);
if (ret)
return ret;
}
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
index b8e3d88..a66aee5 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
@@ -193,9 +193,6 @@
struct aq_hw_caps_s *aq_hw_caps,
u32 *regs_buff);
-int hw_atl_utils_hw_get_settings(struct aq_hw_s *self,
- struct ethtool_cmd *cmd);
-
int hw_atl_utils_hw_set_power(struct aq_hw_s *self,
unsigned int power_state);
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 099b374..5274501 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -2026,9 +2026,12 @@
priv->num_rx_desc_words = params->num_rx_desc_words;
priv->irq0 = platform_get_irq(pdev, 0);
- if (!priv->is_lite)
+ if (!priv->is_lite) {
priv->irq1 = platform_get_irq(pdev, 1);
- priv->wol_irq = platform_get_irq(pdev, 2);
+ priv->wol_irq = platform_get_irq(pdev, 2);
+ } else {
+ priv->wol_irq = platform_get_irq(pdev, 1);
+ }
if (priv->irq0 <= 0 || (priv->irq1 <= 0 && !priv->is_lite)) {
dev_err(&pdev->dev, "invalid interrupts\n");
ret = -EINVAL;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index eccb3d1..f619c4c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -1926,7 +1926,7 @@
}
/* select a non-FCoE queue */
- return fallback(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp);
+ return fallback(dev, skb) % (BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos);
}
void bnx2x_set_num_queues(struct bnx2x *bp)
@@ -3883,15 +3883,26 @@
/* when transmitting in a vf, start bd must hold the ethertype
* for fw to enforce it
*/
+ u16 vlan_tci = 0;
#ifndef BNX2X_STOP_ON_ERROR
- if (IS_VF(bp))
+ if (IS_VF(bp)) {
#endif
- tx_start_bd->vlan_or_ethertype =
- cpu_to_le16(ntohs(eth->h_proto));
+ /* Still need to consider inband vlan for enforced */
+ if (__vlan_get_tag(skb, &vlan_tci)) {
+ tx_start_bd->vlan_or_ethertype =
+ cpu_to_le16(ntohs(eth->h_proto));
+ } else {
+ tx_start_bd->bd_flags.as_bitfield |=
+ (X_ETH_INBAND_VLAN <<
+ ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
+ tx_start_bd->vlan_or_ethertype =
+ cpu_to_le16(vlan_tci);
+ }
#ifndef BNX2X_STOP_ON_ERROR
- else
+ } else {
/* used by FW for packet accounting */
tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
+ }
#endif
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index bdfd53b..9ca994d 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -901,6 +901,8 @@
/* release VF resources */
bnx2x_vf_free_resc(bp, vf);
+ vf->malicious = false;
+
/* re-open the mailbox */
bnx2x_vf_enable_mbx(bp, vf->abs_vfid);
return;
@@ -1822,9 +1824,11 @@
vf->abs_vfid, qidx);
bnx2x_vf_handle_rss_update_eqe(bp, vf);
case EVENT_RING_OPCODE_VF_FLR:
- case EVENT_RING_OPCODE_MALICIOUS_VF:
/* Do nothing for now */
return 0;
+ case EVENT_RING_OPCODE_MALICIOUS_VF:
+ vf->malicious = true;
+ return 0;
}
return 0;
@@ -1905,6 +1909,13 @@
continue;
}
+ if (vf->malicious) {
+ DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS),
+ "vf %d malicious so no stats for it\n",
+ vf->abs_vfid);
+ continue;
+ }
+
DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS),
"add addresses for vf %d\n", vf->abs_vfid);
for_each_vfq(vf, j) {
@@ -3042,7 +3053,7 @@
{
BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping,
sizeof(struct bnx2x_vf_mbx_msg));
- BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->pf2vf_bulletin_mapping,
+ BNX2X_PCI_FREE(bp->pf2vf_bulletin, bp->pf2vf_bulletin_mapping,
sizeof(union pf_vf_bulletin));
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
index 888d0b6..53466f6 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
@@ -141,6 +141,7 @@
#define VF_RESET 3 /* VF FLR'd, pending cleanup */
bool flr_clnup_stage; /* true during flr cleanup */
+ bool malicious; /* true if FW indicated so, until FLR */
/* dma */
dma_addr_t fw_stat_map;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 38a5c67..ea1bfcf 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -2196,10 +2196,14 @@
if (err)
goto irq_err;
}
+
+ mutex_lock(&uld_mutex);
enable_rx(adap);
t4_sge_start(adap);
t4_intr_enable(adap);
adap->flags |= FULL_INIT_DONE;
+ mutex_unlock(&uld_mutex);
+
notify_ulds(adap, CXGB4_STATE_UP);
#if IS_ENABLED(CONFIG_IPV6)
update_clip(adap);
@@ -2771,6 +2775,9 @@
{
int port;
+ if (pci_channel_offline(adap->pdev))
+ return;
+
/* Disable the SGE since ULDs are going to free resources that
* could be exposed to the adapter. RDMA MWs for example...
*/
@@ -3882,9 +3889,10 @@
spin_lock(&adap->stats_lock);
for_each_port(adap, i) {
struct net_device *dev = adap->port[i];
-
- netif_device_detach(dev);
- netif_carrier_off(dev);
+ if (dev) {
+ netif_device_detach(dev);
+ netif_carrier_off(dev);
+ }
}
spin_unlock(&adap->stats_lock);
disable_interrupts(adap);
@@ -3963,12 +3971,13 @@
rtnl_lock();
for_each_port(adap, i) {
struct net_device *dev = adap->port[i];
-
- if (netif_running(dev)) {
- link_start(dev);
- cxgb_set_rxmode(dev);
+ if (dev) {
+ if (netif_running(dev)) {
+ link_start(dev);
+ cxgb_set_rxmode(dev);
+ }
+ netif_device_attach(dev);
}
- netif_device_attach(dev);
}
rtnl_unlock();
}
@@ -4516,7 +4525,7 @@
/* Initialize the device structure. */
dev->netdev_ops = &cxgb4_mgmt_netdev_ops;
dev->ethtool_ops = &cxgb4_mgmt_ethtool_ops;
- dev->destructor = free_netdev;
+ dev->needs_free_netdev = true;
}
static int config_mgmt_dev(struct pci_dev *pdev)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index aded42b96..3a34aa6 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -4557,8 +4557,13 @@
*/
void t4_intr_disable(struct adapter *adapter)
{
- u32 whoami = t4_read_reg(adapter, PL_WHOAMI_A);
- u32 pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ?
+ u32 whoami, pf;
+
+ if (pci_channel_offline(adapter->pdev))
+ return;
+
+ whoami = t4_read_reg(adapter, PL_WHOAMI_A);
+ pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ?
SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami);
t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE_A), 0);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
index 3549d387..f2d623a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
@@ -37,7 +37,7 @@
#define T4FW_VERSION_MAJOR 0x01
#define T4FW_VERSION_MINOR 0x10
-#define T4FW_VERSION_MICRO 0x2B
+#define T4FW_VERSION_MICRO 0x2D
#define T4FW_VERSION_BUILD 0x00
#define T4FW_MIN_VERSION_MAJOR 0x01
@@ -46,7 +46,7 @@
#define T5FW_VERSION_MAJOR 0x01
#define T5FW_VERSION_MINOR 0x10
-#define T5FW_VERSION_MICRO 0x2B
+#define T5FW_VERSION_MICRO 0x2D
#define T5FW_VERSION_BUILD 0x00
#define T5FW_MIN_VERSION_MAJOR 0x00
@@ -55,7 +55,7 @@
#define T6FW_VERSION_MAJOR 0x01
#define T6FW_VERSION_MINOR 0x10
-#define T6FW_VERSION_MICRO 0x2B
+#define T6FW_VERSION_MICRO 0x2D
#define T6FW_VERSION_BUILD 0x00
#define T6FW_MIN_VERSION_MAJOR 0x00
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index e863ba7..8bb0db9 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -739,6 +739,8 @@
if (ret)
return ret;
+ napi_enable(&priv->napi);
+
ethoc_init_ring(priv, dev->mem_start);
ethoc_reset(priv);
@@ -754,7 +756,6 @@
priv->old_duplex = -1;
phy_start(dev->phydev);
- napi_enable(&priv->napi);
if (netif_msg_ifup(priv)) {
dev_info(&dev->dev, "I/O: %08lx Memory: %08lx-%08lx\n",
diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
index 446c7b3..a10de1e 100644
--- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c
+++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
@@ -381,7 +381,7 @@
{
const struct of_device_id *id =
of_match_device(fsl_pq_mdio_match, &pdev->dev);
- const struct fsl_pq_mdio_data *data = id->data;
+ const struct fsl_pq_mdio_data *data;
struct device_node *np = pdev->dev.of_node;
struct resource res;
struct device_node *tbi;
@@ -389,6 +389,13 @@
struct mii_bus *new_bus;
int err;
+ if (!id) {
+ dev_err(&pdev->dev, "Failed to match device\n");
+ return -ENODEV;
+ }
+
+ data = id->data;
+
dev_dbg(&pdev->dev, "found %s compatible node\n", id->compatible);
new_bus = mdiobus_alloc_size(sizeof(*priv));
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index 508923f..259e69a 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -343,6 +343,7 @@
{
struct emac_regs __iomem *p = dev->emacp;
int n = 20;
+ bool __maybe_unused try_internal_clock = false;
DBG(dev, "reset" NL);
@@ -355,6 +356,7 @@
}
#ifdef CONFIG_PPC_DCR_NATIVE
+do_retry:
/*
* PPC460EX/GT Embedded Processor Advanced User's Manual
* section 28.10.1 Mode Register 0 (EMACx_MR0) states:
@@ -362,10 +364,19 @@
* of the EMAC. If none is present, select the internal clock
* (SDR0_ETH_CFG[EMACx_PHY_CLK] = 1).
* After a soft reset, select the external clock.
+ *
+ * The AR8035-A PHY Meraki MR24 does not provide a TX Clk if the
+ * ethernet cable is not attached. This causes the reset to timeout
+ * and the PHY detection code in emac_init_phy() is unable to
+ * communicate and detect the AR8035-A PHY. As a result, the emac
+ * driver bails out early and the user has no ethernet.
+ * In order to stay compatible with existing configurations, the
+ * driver will temporarily switch to the internal clock, after
+ * the first reset fails.
*/
if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) {
- if (dev->phy_address == 0xffffffff &&
- dev->phy_map == 0xffffffff) {
+ if (try_internal_clock || (dev->phy_address == 0xffffffff &&
+ dev->phy_map == 0xffffffff)) {
/* No PHY: select internal loop clock before reset */
dcri_clrset(SDR0, SDR0_ETH_CFG,
0, SDR0_ETH_CFG_ECS << dev->cell_index);
@@ -383,8 +394,15 @@
#ifdef CONFIG_PPC_DCR_NATIVE
if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) {
- if (dev->phy_address == 0xffffffff &&
- dev->phy_map == 0xffffffff) {
+ if (!n && !try_internal_clock) {
+ /* first attempt has timed out. */
+ n = 20;
+ try_internal_clock = true;
+ goto do_retry;
+ }
+
+ if (try_internal_clock || (dev->phy_address == 0xffffffff &&
+ dev->phy_map == 0xffffffff)) {
/* No PHY: restore external clock source after reset */
dcri_clrset(SDR0, SDR0_ETH_CFG,
SDR0_ETH_CFG_ECS << dev->cell_index, 0);
@@ -2460,20 +2478,24 @@
return emac_reset(dev);
}
+static int emac_mdio_phy_start_aneg(struct mii_phy *phy,
+ struct phy_device *phy_dev)
+{
+ phy_dev->autoneg = phy->autoneg;
+ phy_dev->speed = phy->speed;
+ phy_dev->duplex = phy->duplex;
+ phy_dev->advertising = phy->advertising;
+ return phy_start_aneg(phy_dev);
+}
+
static int emac_mdio_setup_aneg(struct mii_phy *phy, u32 advertise)
{
struct net_device *ndev = phy->dev;
struct emac_instance *dev = netdev_priv(ndev);
- dev->phy.autoneg = AUTONEG_ENABLE;
- dev->phy.speed = SPEED_1000;
- dev->phy.duplex = DUPLEX_FULL;
- dev->phy.advertising = advertise;
phy->autoneg = AUTONEG_ENABLE;
- phy->speed = dev->phy.speed;
- phy->duplex = dev->phy.duplex;
phy->advertising = advertise;
- return phy_start_aneg(dev->phy_dev);
+ return emac_mdio_phy_start_aneg(phy, dev->phy_dev);
}
static int emac_mdio_setup_forced(struct mii_phy *phy, int speed, int fd)
@@ -2481,13 +2503,10 @@
struct net_device *ndev = phy->dev;
struct emac_instance *dev = netdev_priv(ndev);
- dev->phy.autoneg = AUTONEG_DISABLE;
- dev->phy.speed = speed;
- dev->phy.duplex = fd;
phy->autoneg = AUTONEG_DISABLE;
phy->speed = speed;
phy->duplex = fd;
- return phy_start_aneg(dev->phy_dev);
+ return emac_mdio_phy_start_aneg(phy, dev->phy_dev);
}
static int emac_mdio_poll_link(struct mii_phy *phy)
@@ -2509,16 +2528,17 @@
{
struct net_device *ndev = phy->dev;
struct emac_instance *dev = netdev_priv(ndev);
+ struct phy_device *phy_dev = dev->phy_dev;
int res;
- res = phy_read_status(dev->phy_dev);
+ res = phy_read_status(phy_dev);
if (res)
return res;
- dev->phy.speed = phy->speed;
- dev->phy.duplex = phy->duplex;
- dev->phy.pause = phy->pause;
- dev->phy.asym_pause = phy->asym_pause;
+ phy->speed = phy_dev->speed;
+ phy->duplex = phy_dev->duplex;
+ phy->pause = phy_dev->pause;
+ phy->asym_pause = phy_dev->asym_pause;
return 0;
}
@@ -2528,13 +2548,6 @@
struct emac_instance *dev = netdev_priv(ndev);
phy_start(dev->phy_dev);
- dev->phy.autoneg = phy->autoneg;
- dev->phy.speed = phy->speed;
- dev->phy.duplex = phy->duplex;
- dev->phy.advertising = phy->advertising;
- dev->phy.pause = phy->pause;
- dev->phy.asym_pause = phy->asym_pause;
-
return phy_init_hw(dev->phy_dev);
}
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 4f2d329..c0fbeb3 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -81,7 +81,7 @@
static const char ibmvnic_driver_name[] = "ibmvnic";
static const char ibmvnic_driver_string[] = "IBM System i/p Virtual NIC Driver";
-MODULE_AUTHOR("Santiago Leon <santi_leon@yahoo.com>");
+MODULE_AUTHOR("Santiago Leon");
MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(IBMVNIC_DRIVER_VERSION);
@@ -1468,6 +1468,11 @@
}
#endif
+static int ibmvnic_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ return -EOPNOTSUPP;
+}
+
static const struct net_device_ops ibmvnic_netdev_ops = {
.ndo_open = ibmvnic_open,
.ndo_stop = ibmvnic_close,
@@ -1479,6 +1484,7 @@
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = ibmvnic_netpoll_controller,
#endif
+ .ndo_change_mtu = ibmvnic_change_mtu,
};
/* ethtool functions */
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index cdde3cc2..44d9610 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -399,6 +399,7 @@
#define I40E_FLAG_RX_CSUM_ENABLED BIT_ULL(1)
#define I40E_FLAG_MSI_ENABLED BIT_ULL(2)
#define I40E_FLAG_MSIX_ENABLED BIT_ULL(3)
+#define I40E_FLAG_HW_ATR_EVICT_ENABLED BIT_ULL(4)
#define I40E_FLAG_RSS_ENABLED BIT_ULL(6)
#define I40E_FLAG_VMDQ_ENABLED BIT_ULL(7)
#define I40E_FLAG_IWARP_ENABLED BIT_ULL(10)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 7a8eb48..894c8e5 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -224,7 +224,7 @@
I40E_PRIV_FLAG("LinkPolling", I40E_FLAG_LINK_POLLING_ENABLED, 0),
I40E_PRIV_FLAG("flow-director-atr", I40E_FLAG_FD_ATR_ENABLED, 0),
I40E_PRIV_FLAG("veb-stats", I40E_FLAG_VEB_STATS_ENABLED, 0),
- I40E_PRIV_FLAG("hw-atr-eviction", I40E_FLAG_HW_ATR_EVICT_CAPABLE, 0),
+ I40E_PRIV_FLAG("hw-atr-eviction", I40E_FLAG_HW_ATR_EVICT_ENABLED, 0),
I40E_PRIV_FLAG("legacy-rx", I40E_FLAG_LEGACY_RX, 0),
};
@@ -4092,7 +4092,7 @@
/* Only allow ATR evict on hardware that is capable of handling it */
if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE)
- pf->flags &= ~I40E_FLAG_HW_ATR_EVICT_CAPABLE;
+ pf->flags &= ~I40E_FLAG_HW_ATR_EVICT_ENABLED;
if (changed_flags & I40E_FLAG_TRUE_PROMISC_SUPPORT) {
u16 sw_flags = 0, valid_flags = 0;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index d5c9c9e..a7a4b28 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -295,7 +295,7 @@
**/
void i40e_service_event_schedule(struct i40e_pf *pf)
{
- if (!test_bit(__I40E_VSI_DOWN, pf->state) &&
+ if (!test_bit(__I40E_DOWN, pf->state) &&
!test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
queue_work(i40e_wq, &pf->service_task);
}
@@ -3611,7 +3611,7 @@
* this is not a performance path and napi_schedule()
* can deal with rescheduling.
*/
- if (!test_bit(__I40E_VSI_DOWN, pf->state))
+ if (!test_bit(__I40E_DOWN, pf->state))
napi_schedule_irqoff(&q_vector->napi);
}
@@ -3687,7 +3687,7 @@
enable_intr:
/* re-enable interrupt causes */
wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
- if (!test_bit(__I40E_VSI_DOWN, pf->state)) {
+ if (!test_bit(__I40E_DOWN, pf->state)) {
i40e_service_event_schedule(pf);
i40e_irq_dynamic_enable_icr0(pf, false);
}
@@ -6203,7 +6203,7 @@
{
/* if interface is down do nothing */
- if (test_bit(__I40E_VSI_DOWN, pf->state))
+ if (test_bit(__I40E_DOWN, pf->state))
return;
if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
@@ -6344,7 +6344,7 @@
int i;
/* if interface is down do nothing */
- if (test_bit(__I40E_VSI_DOWN, pf->state) ||
+ if (test_bit(__I40E_DOWN, pf->state) ||
test_bit(__I40E_CONFIG_BUSY, pf->state))
return;
@@ -6399,9 +6399,9 @@
reset_flags |= BIT(__I40E_GLOBAL_RESET_REQUESTED);
clear_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state);
}
- if (test_bit(__I40E_VSI_DOWN_REQUESTED, pf->state)) {
- reset_flags |= BIT(__I40E_VSI_DOWN_REQUESTED);
- clear_bit(__I40E_VSI_DOWN_REQUESTED, pf->state);
+ if (test_bit(__I40E_DOWN_REQUESTED, pf->state)) {
+ reset_flags |= BIT(__I40E_DOWN_REQUESTED);
+ clear_bit(__I40E_DOWN_REQUESTED, pf->state);
}
/* If there's a recovery already waiting, it takes
@@ -6415,7 +6415,7 @@
/* If we're already down or resetting, just bail */
if (reset_flags &&
- !test_bit(__I40E_VSI_DOWN, pf->state) &&
+ !test_bit(__I40E_DOWN, pf->state) &&
!test_bit(__I40E_CONFIG_BUSY, pf->state)) {
rtnl_lock();
i40e_do_reset(pf, reset_flags, true);
@@ -7002,7 +7002,7 @@
u32 val;
int v;
- if (test_bit(__I40E_VSI_DOWN, pf->state))
+ if (test_bit(__I40E_DOWN, pf->state))
goto clear_recovery;
dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n");
@@ -8821,11 +8821,12 @@
(pf->hw.aq.api_min_ver > 4))) {
/* Supported in FW API version higher than 1.4 */
pf->flags |= I40E_FLAG_GENEVE_OFFLOAD_CAPABLE;
- pf->flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE;
- } else {
- pf->flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE;
}
+ /* Enable HW ATR eviction if possible */
+ if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE)
+ pf->flags |= I40E_FLAG_HW_ATR_EVICT_ENABLED;
+
pf->eeprom_version = 0xDEAD;
pf->lan_veb = I40E_NO_VEB;
pf->lan_vsi = I40E_NO_VSI;
@@ -9767,7 +9768,7 @@
return -ENODEV;
}
if (vsi == pf->vsi[pf->lan_vsi] &&
- !test_bit(__I40E_VSI_DOWN, pf->state)) {
+ !test_bit(__I40E_DOWN, pf->state)) {
dev_info(&pf->pdev->dev, "Can't remove PF VSI\n");
return -ENODEV;
}
@@ -11003,7 +11004,7 @@
}
pf->next_vsi = 0;
pf->pdev = pdev;
- set_bit(__I40E_VSI_DOWN, pf->state);
+ set_bit(__I40E_DOWN, pf->state);
hw = &pf->hw;
hw->back = pf;
@@ -11293,7 +11294,7 @@
* before setting up the misc vector or we get a race and the vector
* ends up disabled forever.
*/
- clear_bit(__I40E_VSI_DOWN, pf->state);
+ clear_bit(__I40E_DOWN, pf->state);
/* In case of MSIX we are going to setup the misc vector right here
* to handle admin queue events etc. In case of legacy and MSI
@@ -11448,7 +11449,7 @@
/* Unwind what we've done if something failed in the setup */
err_vsis:
- set_bit(__I40E_VSI_DOWN, pf->state);
+ set_bit(__I40E_DOWN, pf->state);
i40e_clear_interrupt_scheme(pf);
kfree(pf->vsi);
err_switch_setup:
@@ -11500,7 +11501,7 @@
/* no more scheduling of any task */
set_bit(__I40E_SUSPENDED, pf->state);
- set_bit(__I40E_VSI_DOWN, pf->state);
+ set_bit(__I40E_DOWN, pf->state);
if (pf->service_timer.data)
del_timer_sync(&pf->service_timer);
if (pf->service_task.func)
@@ -11740,7 +11741,7 @@
struct i40e_hw *hw = &pf->hw;
set_bit(__I40E_SUSPENDED, pf->state);
- set_bit(__I40E_VSI_DOWN, pf->state);
+ set_bit(__I40E_DOWN, pf->state);
rtnl_lock();
i40e_prep_for_reset(pf, true);
rtnl_unlock();
@@ -11789,7 +11790,7 @@
int retval = 0;
set_bit(__I40E_SUSPENDED, pf->state);
- set_bit(__I40E_VSI_DOWN, pf->state);
+ set_bit(__I40E_DOWN, pf->state);
if (pf->wol_en && (pf->flags & I40E_FLAG_WOL_MC_MAGIC_PKT_WAKE))
i40e_enable_mc_magic_wake(pf);
@@ -11841,7 +11842,7 @@
/* handling the reset will rebuild the device state */
if (test_and_clear_bit(__I40E_SUSPENDED, pf->state)) {
- clear_bit(__I40E_VSI_DOWN, pf->state);
+ clear_bit(__I40E_DOWN, pf->state);
rtnl_lock();
i40e_reset_and_rebuild(pf, false, true);
rtnl_unlock();
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 29321a6..77115c2 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -1854,7 +1854,8 @@
#if (PAGE_SIZE < 8192)
unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
#else
- unsigned int truesize = SKB_DATA_ALIGN(size);
+ unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
+ SKB_DATA_ALIGN(I40E_SKB_PAD + size);
#endif
struct sk_buff *skb;
@@ -2340,7 +2341,7 @@
/* Due to lack of space, no more new filters can be programmed */
if (th->syn && (pf->flags & I40E_FLAG_FD_ATR_AUTO_DISABLED))
return;
- if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) {
+ if (pf->flags & I40E_FLAG_HW_ATR_EVICT_ENABLED) {
/* HW ATR eviction will take care of removing filters on FIN
* and RST packets.
*/
@@ -2402,7 +2403,7 @@
I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
- if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE)
+ if (pf->flags & I40E_FLAG_HW_ATR_EVICT_ENABLED)
dtype_cmd |= I40E_TXD_FLTR_QW1_ATR_MASK;
fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 95c23fb..0fb38ca 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -3017,10 +3017,12 @@
VLAN_VID_MASK));
}
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
if (vlan_id || qos)
ret = i40e_vsi_add_pvid(vsi, vlanprio);
else
i40e_vsi_remove_pvid(vsi);
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
if (vlan_id) {
dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n",
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index dfe241a..12b02e5 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -1190,7 +1190,8 @@
#if (PAGE_SIZE < 8192)
unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
#else
- unsigned int truesize = SKB_DATA_ALIGN(size);
+ unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
+ SKB_DATA_ALIGN(I40E_SKB_PAD + size);
#endif
struct sk_buff *skb;
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index 9b875d7..33c9016 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -3719,7 +3719,7 @@
dma_addr_t *dma_addr,
phys_addr_t *phys_addr)
{
- int cpu = smp_processor_id();
+ int cpu = get_cpu();
*dma_addr = mvpp2_percpu_read(priv, cpu,
MVPP2_BM_PHY_ALLOC_REG(bm_pool->id));
@@ -3740,6 +3740,8 @@
if (sizeof(phys_addr_t) == 8)
*phys_addr |= (u64)phys_addr_highbits << 32;
}
+
+ put_cpu();
}
/* Free all buffers from the pool */
@@ -3920,18 +3922,12 @@
return bm;
}
-/* Get pool number from a BM cookie */
-static inline int mvpp2_bm_cookie_pool_get(unsigned long cookie)
-{
- return (cookie >> MVPP2_BM_COOKIE_POOL_OFFS) & 0xFF;
-}
-
/* Release buffer to BM */
static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool,
dma_addr_t buf_dma_addr,
phys_addr_t buf_phys_addr)
{
- int cpu = smp_processor_id();
+ int cpu = get_cpu();
if (port->priv->hw_version == MVPP22) {
u32 val = 0;
@@ -3958,15 +3954,15 @@
MVPP2_BM_VIRT_RLS_REG, buf_phys_addr);
mvpp2_percpu_write(port->priv, cpu,
MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr);
+
+ put_cpu();
}
/* Refill BM pool */
-static void mvpp2_pool_refill(struct mvpp2_port *port, u32 bm,
+static void mvpp2_pool_refill(struct mvpp2_port *port, int pool,
dma_addr_t dma_addr,
phys_addr_t phys_addr)
{
- int pool = mvpp2_bm_cookie_pool_get(bm);
-
mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
}
@@ -4186,8 +4182,6 @@
{
u32 val;
- return;
-
/* Only GOP port 0 has an XLG MAC */
if (port->gop_id == 0) {
val = readl(port->base + MVPP22_XLG_CTRL3_REG);
@@ -4515,21 +4509,6 @@
mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
}
-/* Obtain BM cookie information from descriptor */
-static u32 mvpp2_bm_cookie_build(struct mvpp2_port *port,
- struct mvpp2_rx_desc *rx_desc)
-{
- int cpu = smp_processor_id();
- int pool;
-
- pool = (mvpp2_rxdesc_status_get(port, rx_desc) &
- MVPP2_RXD_BM_POOL_ID_MASK) >>
- MVPP2_RXD_BM_POOL_ID_OFFS;
-
- return ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS) |
- ((cpu & 0xFF) << MVPP2_BM_COOKIE_CPU_OFFS);
-}
-
/* Tx descriptors helper methods */
/* Get pointer to next Tx descriptor to be processed (send) by HW */
@@ -4757,7 +4736,7 @@
static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port,
struct mvpp2_rx_queue *rxq)
{
- int cpu = smp_processor_id();
+ int cpu = get_cpu();
if (rxq->pkts_coal > MVPP2_OCCUPIED_THRESH_MASK)
rxq->pkts_coal = MVPP2_OCCUPIED_THRESH_MASK;
@@ -4765,6 +4744,8 @@
mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id);
mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_THRESH_REG,
rxq->pkts_coal);
+
+ put_cpu();
}
static u32 mvpp2_usec_to_cycles(u32 usec, unsigned long clk_hz)
@@ -4945,7 +4926,7 @@
mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
/* Set Rx descriptors queue starting address - indirect access */
- cpu = smp_processor_id();
+ cpu = get_cpu();
mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id);
if (port->priv->hw_version == MVPP21)
rxq_dma = rxq->descs_dma;
@@ -4954,6 +4935,7 @@
mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma);
mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, rxq->size);
mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_INDEX_REG, 0);
+ put_cpu();
/* Set Offset */
mvpp2_rxq_offset_set(port, rxq->id, NET_SKB_PAD);
@@ -4980,9 +4962,13 @@
for (i = 0; i < rx_received; i++) {
struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
- u32 bm = mvpp2_bm_cookie_build(port, rx_desc);
+ u32 status = mvpp2_rxdesc_status_get(port, rx_desc);
+ int pool;
- mvpp2_pool_refill(port, bm,
+ pool = (status & MVPP2_RXD_BM_POOL_ID_MASK) >>
+ MVPP2_RXD_BM_POOL_ID_OFFS;
+
+ mvpp2_pool_refill(port, pool,
mvpp2_rxdesc_dma_addr_get(port, rx_desc),
mvpp2_rxdesc_cookie_get(port, rx_desc));
}
@@ -5012,10 +4998,11 @@
* free descriptor number
*/
mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
- cpu = smp_processor_id();
+ cpu = get_cpu();
mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id);
mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, 0);
mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, 0);
+ put_cpu();
}
/* Create and initialize a Tx queue */
@@ -5038,7 +5025,7 @@
txq->last_desc = txq->size - 1;
/* Set Tx descriptors queue starting address - indirect access */
- cpu = smp_processor_id();
+ cpu = get_cpu();
mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG,
txq->descs_dma);
@@ -5063,6 +5050,7 @@
mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG,
MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 |
MVPP2_PREF_BUF_THRESH(desc_per_txq / 2));
+ put_cpu();
/* WRR / EJP configuration - indirect access */
tx_port_num = mvpp2_egress_port(port);
@@ -5133,10 +5121,11 @@
mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0);
/* Set Tx descriptors queue starting address and size */
- cpu = smp_processor_id();
+ cpu = get_cpu();
mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG, 0);
mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_SIZE_REG, 0);
+ put_cpu();
}
/* Cleanup Tx ports */
@@ -5146,7 +5135,7 @@
int delay, pending, cpu;
u32 val;
- cpu = smp_processor_id();
+ cpu = get_cpu();
mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
val = mvpp2_percpu_read(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG);
val |= MVPP2_TXQ_DRAIN_EN_MASK;
@@ -5173,6 +5162,7 @@
val &= ~MVPP2_TXQ_DRAIN_EN_MASK;
mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, val);
+ put_cpu();
for_each_present_cpu(cpu) {
txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
@@ -5420,7 +5410,7 @@
/* Reuse skb if possible, or allocate a new skb and add it to BM pool */
static int mvpp2_rx_refill(struct mvpp2_port *port,
- struct mvpp2_bm_pool *bm_pool, u32 bm)
+ struct mvpp2_bm_pool *bm_pool, int pool)
{
dma_addr_t dma_addr;
phys_addr_t phys_addr;
@@ -5432,7 +5422,7 @@
if (!buf)
return -ENOMEM;
- mvpp2_pool_refill(port, bm, dma_addr, phys_addr);
+ mvpp2_pool_refill(port, pool, dma_addr, phys_addr);
return 0;
}
@@ -5490,7 +5480,7 @@
unsigned int frag_size;
dma_addr_t dma_addr;
phys_addr_t phys_addr;
- u32 bm, rx_status;
+ u32 rx_status;
int pool, rx_bytes, err;
void *data;
@@ -5502,8 +5492,8 @@
phys_addr = mvpp2_rxdesc_cookie_get(port, rx_desc);
data = (void *)phys_to_virt(phys_addr);
- bm = mvpp2_bm_cookie_build(port, rx_desc);
- pool = mvpp2_bm_cookie_pool_get(bm);
+ pool = (rx_status & MVPP2_RXD_BM_POOL_ID_MASK) >>
+ MVPP2_RXD_BM_POOL_ID_OFFS;
bm_pool = &port->priv->bm_pools[pool];
/* In case of an error, release the requested buffer pointer
@@ -5516,7 +5506,7 @@
dev->stats.rx_errors++;
mvpp2_rx_error(port, rx_desc);
/* Return the buffer to the pool */
- mvpp2_pool_refill(port, bm, dma_addr, phys_addr);
+ mvpp2_pool_refill(port, pool, dma_addr, phys_addr);
continue;
}
@@ -5531,7 +5521,7 @@
goto err_drop_frame;
}
- err = mvpp2_rx_refill(port, bm_pool, bm);
+ err = mvpp2_rx_refill(port, bm_pool, pool);
if (err) {
netdev_err(port->dev, "failed to refill BM pools\n");
goto err_drop_frame;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index ae5fdc2..ffbcb27 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -1562,11 +1562,6 @@
qpn = priv->drop_qp.qpn;
else if (cmd->fs.ring_cookie & EN_ETHTOOL_QP_ATTACH) {
qpn = cmd->fs.ring_cookie & (EN_ETHTOOL_QP_ATTACH - 1);
- if (qpn < priv->rss_map.base_qpn ||
- qpn >= priv->rss_map.base_qpn + priv->rx_ring_num) {
- en_warn(priv, "rxnfc: QP (0x%x) doesn't exist\n", qpn);
- return -EINVAL;
- }
} else {
if (cmd->fs.ring_cookie >= priv->rx_ring_num) {
en_warn(priv, "rxnfc: RX ring (%llu) doesn't exist\n",
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index 1a670b6..0710b36 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -35,6 +35,7 @@
#include <linux/etherdevice.h>
#include <linux/mlx4/cmd.h>
+#include <linux/mlx4/qp.h>
#include <linux/export.h>
#include "mlx4.h"
@@ -985,16 +986,21 @@
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
+ if (!mlx4_qp_lookup(dev, rule->qpn)) {
+ mlx4_err_rule(dev, "QP doesn't exist\n", rule);
+ ret = -EINVAL;
+ goto out;
+ }
+
trans_rule_ctrl_to_hw(rule, mailbox->buf);
size += sizeof(struct mlx4_net_trans_rule_hw_ctrl);
list_for_each_entry(cur, &rule->list, list) {
ret = parse_trans_rule(dev, cur, mailbox->buf + size);
- if (ret < 0) {
- mlx4_free_cmd_mailbox(dev, mailbox);
- return ret;
- }
+ if (ret < 0)
+ goto out;
+
size += ret;
}
@@ -1021,6 +1027,7 @@
}
}
+out:
mlx4_free_cmd_mailbox(dev, mailbox);
return ret;
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index 2d6abd4..5a310d3 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -384,6 +384,19 @@
__mlx4_qp_free_icm(dev, qpn);
}
+struct mlx4_qp *mlx4_qp_lookup(struct mlx4_dev *dev, u32 qpn)
+{
+ struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
+ struct mlx4_qp *qp;
+
+ spin_lock(&qp_table->lock);
+
+ qp = __mlx4_qp_lookup(dev, qpn);
+
+ spin_unlock(&qp_table->lock);
+ return qp;
+}
+
int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp, gfp_t gfp)
{
struct mlx4_priv *priv = mlx4_priv(dev);
@@ -471,6 +484,12 @@
}
if (attr & MLX4_UPDATE_QP_QOS_VPORT) {
+ if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QOS_VPP)) {
+ mlx4_warn(dev, "Granular QoS per VF is not enabled\n");
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
qp_mask |= 1ULL << MLX4_UPD_QP_MASK_QOS_VPP;
cmd->qp_context.qos_vport = params->qos_vport;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 0751654..8127838 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -5255,6 +5255,13 @@
mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
}
+static void update_qos_vpp(struct mlx4_update_qp_context *ctx,
+ struct mlx4_vf_immed_vlan_work *work)
+{
+ ctx->qp_mask |= cpu_to_be64(1ULL << MLX4_UPD_QP_MASK_QOS_VPP);
+ ctx->qp_context.qos_vport = work->qos_vport;
+}
+
void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
{
struct mlx4_vf_immed_vlan_work *work =
@@ -5369,11 +5376,10 @@
qp->sched_queue & 0xC7;
upd_context->qp_context.pri_path.sched_queue |=
((work->qos & 0x7) << 3);
- upd_context->qp_mask |=
- cpu_to_be64(1ULL <<
- MLX4_UPD_QP_MASK_QOS_VPP);
- upd_context->qp_context.qos_vport =
- work->qos_vport;
+
+ if (dev->caps.flags2 &
+ MLX4_DEV_CAP_FLAG2_QOS_VPP)
+ update_qos_vpp(upd_context, work);
}
err = mlx4_cmd(dev, mailbox->dma,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 2fd044b..944fc17 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -458,13 +458,15 @@
struct mlx5e_rx_am_stats {
int ppms; /* packets per msec */
+ int bpms; /* bytes per msec */
int epms; /* events per msec */
};
struct mlx5e_rx_am_sample {
- ktime_t time;
- unsigned int pkt_ctr;
- u16 event_ctr;
+ ktime_t time;
+ u32 pkt_ctr;
+ u32 byte_ctr;
+ u16 event_ctr;
};
struct mlx5e_rx_am { /* Adaptive Moderation */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c
index 02dd3a9..acf32fe 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c
@@ -183,28 +183,27 @@
mlx5e_am_step(am);
}
+#define IS_SIGNIFICANT_DIFF(val, ref) \
+ (((100 * abs((val) - (ref))) / (ref)) > 10) /* more than 10% difference */
+
static int mlx5e_am_stats_compare(struct mlx5e_rx_am_stats *curr,
struct mlx5e_rx_am_stats *prev)
{
- int diff;
-
- if (!prev->ppms)
- return curr->ppms ? MLX5E_AM_STATS_BETTER :
+ if (!prev->bpms)
+ return curr->bpms ? MLX5E_AM_STATS_BETTER :
MLX5E_AM_STATS_SAME;
- diff = curr->ppms - prev->ppms;
- if (((100 * abs(diff)) / prev->ppms) > 10) /* more than 10% diff */
- return (diff > 0) ? MLX5E_AM_STATS_BETTER :
- MLX5E_AM_STATS_WORSE;
+ if (IS_SIGNIFICANT_DIFF(curr->bpms, prev->bpms))
+ return (curr->bpms > prev->bpms) ? MLX5E_AM_STATS_BETTER :
+ MLX5E_AM_STATS_WORSE;
- if (!prev->epms)
- return curr->epms ? MLX5E_AM_STATS_WORSE :
- MLX5E_AM_STATS_SAME;
+ if (IS_SIGNIFICANT_DIFF(curr->ppms, prev->ppms))
+ return (curr->ppms > prev->ppms) ? MLX5E_AM_STATS_BETTER :
+ MLX5E_AM_STATS_WORSE;
- diff = curr->epms - prev->epms;
- if (((100 * abs(diff)) / prev->epms) > 10) /* more than 10% diff */
- return (diff < 0) ? MLX5E_AM_STATS_BETTER :
- MLX5E_AM_STATS_WORSE;
+ if (IS_SIGNIFICANT_DIFF(curr->epms, prev->epms))
+ return (curr->epms < prev->epms) ? MLX5E_AM_STATS_BETTER :
+ MLX5E_AM_STATS_WORSE;
return MLX5E_AM_STATS_SAME;
}
@@ -266,10 +265,13 @@
{
s->time = ktime_get();
s->pkt_ctr = rq->stats.packets;
+ s->byte_ctr = rq->stats.bytes;
s->event_ctr = rq->cq.event_ctr;
}
#define MLX5E_AM_NEVENTS 64
+#define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE)
+#define BIT_GAP(bits, end, start) ((((end) - (start)) + BIT_ULL(bits)) & (BIT_ULL(bits) - 1))
static void mlx5e_am_calc_stats(struct mlx5e_rx_am_sample *start,
struct mlx5e_rx_am_sample *end,
@@ -277,13 +279,17 @@
{
/* u32 holds up to 71 minutes, should be enough */
u32 delta_us = ktime_us_delta(end->time, start->time);
- unsigned int npkts = end->pkt_ctr - start->pkt_ctr;
+ u32 npkts = BIT_GAP(BITS_PER_TYPE(u32), end->pkt_ctr, start->pkt_ctr);
+ u32 nbytes = BIT_GAP(BITS_PER_TYPE(u32), end->byte_ctr,
+ start->byte_ctr);
if (!delta_us)
return;
- curr_stats->ppms = (npkts * USEC_PER_MSEC) / delta_us;
- curr_stats->epms = (MLX5E_AM_NEVENTS * USEC_PER_MSEC) / delta_us;
+ curr_stats->ppms = DIV_ROUND_UP(npkts * USEC_PER_MSEC, delta_us);
+ curr_stats->bpms = DIV_ROUND_UP(nbytes * USEC_PER_MSEC, delta_us);
+ curr_stats->epms = DIV_ROUND_UP(MLX5E_AM_NEVENTS * USEC_PER_MSEC,
+ delta_us);
}
void mlx5e_rx_am_work(struct work_struct *work)
@@ -308,7 +314,8 @@
switch (am->state) {
case MLX5E_AM_MEASURE_IN_PROGRESS:
- nevents = rq->cq.event_ctr - am->start_sample.event_ctr;
+ nevents = BIT_GAP(BITS_PER_TYPE(u16), rq->cq.event_ctr,
+ am->start_sample.event_ctr);
if (nevents < MLX5E_AM_NEVENTS)
break;
mlx5e_am_sample(rq, &end_sample);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index 53e4992..f81c3aa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -417,20 +417,13 @@
};
static const struct counter_desc mlx5e_pme_status_desc[] = {
- { "module_plug", 0 },
{ "module_unplug", 8 },
};
static const struct counter_desc mlx5e_pme_error_desc[] = {
- { "module_pwr_budget_exd", 0 }, /* power budget exceed */
- { "module_long_range", 8 }, /* long range for non MLNX cable */
- { "module_bus_stuck", 16 }, /* bus stuck (I2C or data shorted) */
- { "module_no_eeprom", 24 }, /* no eeprom/retry time out */
- { "module_enforce_part", 32 }, /* enforce part number list */
- { "module_unknown_id", 40 }, /* unknown identifier */
- { "module_high_temp", 48 }, /* high temperature */
+ { "module_bus_stuck", 16 }, /* bus stuck (I2C or data shorted) */
+ { "module_high_temp", 48 }, /* high temperature */
{ "module_bad_shorted", 56 }, /* bad or shorted cable/module */
- { "module_unknown_status", 64 },
};
#endif /* __MLX5_EN_STATS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 0e487e8..8f5125c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -862,7 +862,7 @@
ft_attr.level = level;
ft_attr.prio = prio;
- return __mlx5_create_flow_table(ns, &ft_attr, FS_FT_OP_MOD_NORMAL, 0);
+ return __mlx5_create_flow_table(ns, &ft_attr, FS_FT_OP_MOD_NORMAL, vport);
}
struct mlx5_flow_table*
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 44f59b1..f27f84f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -275,10 +275,8 @@
struct mlx5_core_health *health = &dev->priv.health;
u32 count;
- if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
- mod_timer(&health->timer, get_next_poll_jiffies());
- return;
- }
+ if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
+ goto out;
count = ioread32be(health->health_counter);
if (count == health->prev)
@@ -290,8 +288,6 @@
if (health->miss_counter == MAX_MISSES) {
dev_err(&dev->pdev->dev, "device's health compromised - reached miss count\n");
print_health_info(dev);
- } else {
- mod_timer(&health->timer, get_next_poll_jiffies());
}
if (in_fatal(dev) && !health->sick) {
@@ -305,6 +301,9 @@
"new health works are not permitted at this stage\n");
spin_unlock(&health->wq_lock);
}
+
+out:
+ mod_timer(&health->timer, get_next_poll_jiffies());
}
void mlx5_start_health_poll(struct mlx5_core_dev *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index fe5546b..4f577a5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -537,8 +537,10 @@
/* disable cmdif checksum */
MLX5_SET(cmd_hca_cap, set_hca_cap, cmdif_checksum, 0);
- /* If the HCA supports 4K UARs use it */
- if (MLX5_CAP_GEN_MAX(dev, uar_4k))
+ /* Enable 4K UAR only when HCA supports it and page size is bigger
+ * than 4K.
+ */
+ if (MLX5_CAP_GEN_MAX(dev, uar_4k) && PAGE_SIZE > 4096)
MLX5_SET(cmd_hca_cap, set_hca_cap, uar_4k, 1);
MLX5_SET(cmd_hca_cap, set_hca_cap, log_uar_page_sz, PAGE_SHIFT - 12);
@@ -621,10 +623,9 @@
cpumask_set_cpu(cpumask_local_spread(i, priv->numa_node),
priv->irq_info[i].mask);
-#ifdef CONFIG_SMP
- if (irq_set_affinity_hint(irq, priv->irq_info[i].mask))
+ if (IS_ENABLED(CONFIG_SMP) &&
+ irq_set_affinity_hint(irq, priv->irq_info[i].mask))
mlx5_core_warn(mdev, "irq_set_affinity_hint failed, irq 0x%.4x", irq);
-#endif
return 0;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c
index 483241b..a672f6a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_debug.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c
@@ -2956,7 +2956,7 @@
qed_wr(p_hwfn,
p_ptt,
s_storm_defs[storm_id].cm_ctx_wr_addr,
- BIT(9) | lid);
+ (i << 9) | lid);
*(dump_buf + offset) = qed_rd(p_hwfn,
p_ptt,
rd_reg_addr);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index 537d123..715b3aa 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -1730,7 +1730,8 @@
qed_get_protocol_stats_iscsi(cdev, &stats->iscsi_stats);
break;
default:
- DP_ERR(cdev, "Invalid protocol type = %d\n", type);
+ DP_VERBOSE(cdev, QED_MSG_SP,
+ "Invalid protocol type = %d\n", type);
return;
}
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index 7245b10..8131292 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -1824,22 +1824,44 @@
u32 (*get_cap_size)(void *, int);
void (*set_sys_info)(void *, int, u32);
void (*store_cap_mask)(void *, u32);
+ bool (*encap_rx_offload) (struct qlcnic_adapter *adapter);
+ bool (*encap_tx_offload) (struct qlcnic_adapter *adapter);
};
extern struct qlcnic_nic_template qlcnic_vf_ops;
-static inline bool qlcnic_encap_tx_offload(struct qlcnic_adapter *adapter)
+static inline bool qlcnic_83xx_encap_tx_offload(struct qlcnic_adapter *adapter)
{
return adapter->ahw->extra_capability[0] &
QLCNIC_83XX_FW_CAPAB_ENCAP_TX_OFFLOAD;
}
-static inline bool qlcnic_encap_rx_offload(struct qlcnic_adapter *adapter)
+static inline bool qlcnic_83xx_encap_rx_offload(struct qlcnic_adapter *adapter)
{
return adapter->ahw->extra_capability[0] &
QLCNIC_83XX_FW_CAPAB_ENCAP_RX_OFFLOAD;
}
+static inline bool qlcnic_82xx_encap_tx_offload(struct qlcnic_adapter *adapter)
+{
+ return false;
+}
+
+static inline bool qlcnic_82xx_encap_rx_offload(struct qlcnic_adapter *adapter)
+{
+ return false;
+}
+
+static inline bool qlcnic_encap_rx_offload(struct qlcnic_adapter *adapter)
+{
+ return adapter->ahw->hw_ops->encap_rx_offload(adapter);
+}
+
+static inline bool qlcnic_encap_tx_offload(struct qlcnic_adapter *adapter)
+{
+ return adapter->ahw->hw_ops->encap_tx_offload(adapter);
+}
+
static inline int qlcnic_start_firmware(struct qlcnic_adapter *adapter)
{
return adapter->nic_ops->start_firmware(adapter);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index 4fb6879..f7080d0 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -242,6 +242,8 @@
.get_cap_size = qlcnic_83xx_get_cap_size,
.set_sys_info = qlcnic_83xx_set_sys_info,
.store_cap_mask = qlcnic_83xx_store_cap_mask,
+ .encap_rx_offload = qlcnic_83xx_encap_rx_offload,
+ .encap_tx_offload = qlcnic_83xx_encap_tx_offload,
};
static struct qlcnic_nic_template qlcnic_83xx_ops = {
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
index 838cc0c..7848cf0 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
@@ -341,7 +341,7 @@
}
return -EIO;
}
- usleep_range(1000, 1500);
+ udelay(1200);
}
if (id_reg)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index b6628aa..1b5f7d5 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -632,6 +632,8 @@
.get_cap_size = qlcnic_82xx_get_cap_size,
.set_sys_info = qlcnic_82xx_set_sys_info,
.store_cap_mask = qlcnic_82xx_store_cap_mask,
+ .encap_rx_offload = qlcnic_82xx_encap_rx_offload,
+ .encap_tx_offload = qlcnic_82xx_encap_tx_offload,
};
static int qlcnic_check_multi_tx_capability(struct qlcnic_adapter *adapter)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
index 2f656f3..c58180f 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
@@ -77,6 +77,8 @@
.free_mac_list = qlcnic_sriov_vf_free_mac_list,
.enable_sds_intr = qlcnic_83xx_enable_sds_intr,
.disable_sds_intr = qlcnic_83xx_disable_sds_intr,
+ .encap_rx_offload = qlcnic_83xx_encap_rx_offload,
+ .encap_tx_offload = qlcnic_83xx_encap_tx_offload,
};
static struct qlcnic_nic_template qlcnic_sriov_vf_ops = {
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
index cc065ff..bcd4708 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
@@ -931,7 +931,7 @@
emac_mac_config(adpt);
emac_mac_rx_descs_refill(adpt, &adpt->rx_q);
- adpt->phydev->irq = PHY_IGNORE_INTERRUPT;
+ adpt->phydev->irq = PHY_POLL;
ret = phy_connect_direct(netdev, adpt->phydev, emac_adjust_link,
PHY_INTERFACE_MODE_SGMII);
if (ret) {
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-phy.c b/drivers/net/ethernet/qualcomm/emac/emac-phy.c
index 441c1936..18461fc 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-phy.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-phy.c
@@ -13,15 +13,11 @@
/* Qualcomm Technologies, Inc. EMAC PHY Controller driver.
*/
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_net.h>
#include <linux/of_mdio.h>
#include <linux/phy.h>
#include <linux/iopoll.h>
#include <linux/acpi.h>
#include "emac.h"
-#include "emac-mac.h"
/* EMAC base register offsets */
#define EMAC_MDIO_CTRL 0x001414
@@ -52,62 +48,10 @@
#define MDIO_WAIT_TIMES 1000
-#define EMAC_LINK_SPEED_DEFAULT (\
- EMAC_LINK_SPEED_10_HALF |\
- EMAC_LINK_SPEED_10_FULL |\
- EMAC_LINK_SPEED_100_HALF |\
- EMAC_LINK_SPEED_100_FULL |\
- EMAC_LINK_SPEED_1GB_FULL)
-
-/**
- * emac_phy_mdio_autopoll_disable() - disable mdio autopoll
- * @adpt: the emac adapter
- *
- * The autopoll feature takes over the MDIO bus. In order for
- * the PHY driver to be able to talk to the PHY over the MDIO
- * bus, we need to temporarily disable the autopoll feature.
- */
-static int emac_phy_mdio_autopoll_disable(struct emac_adapter *adpt)
-{
- u32 val;
-
- /* disable autopoll */
- emac_reg_update32(adpt->base + EMAC_MDIO_CTRL, MDIO_AP_EN, 0);
-
- /* wait for any mdio polling to complete */
- if (!readl_poll_timeout(adpt->base + EMAC_MDIO_CTRL, val,
- !(val & MDIO_BUSY), 100, MDIO_WAIT_TIMES * 100))
- return 0;
-
- /* failed to disable; ensure it is enabled before returning */
- emac_reg_update32(adpt->base + EMAC_MDIO_CTRL, 0, MDIO_AP_EN);
-
- return -EBUSY;
-}
-
-/**
- * emac_phy_mdio_autopoll_disable() - disable mdio autopoll
- * @adpt: the emac adapter
- *
- * The EMAC has the ability to poll the external PHY on the MDIO
- * bus for link state changes. This eliminates the need for the
- * driver to poll the phy. If if the link state does change,
- * the EMAC issues an interrupt on behalf of the PHY.
- */
-static void emac_phy_mdio_autopoll_enable(struct emac_adapter *adpt)
-{
- emac_reg_update32(adpt->base + EMAC_MDIO_CTRL, 0, MDIO_AP_EN);
-}
-
static int emac_mdio_read(struct mii_bus *bus, int addr, int regnum)
{
struct emac_adapter *adpt = bus->priv;
u32 reg;
- int ret;
-
- ret = emac_phy_mdio_autopoll_disable(adpt);
- if (ret)
- return ret;
emac_reg_update32(adpt->base + EMAC_PHY_STS, PHY_ADDR_BMSK,
(addr << PHY_ADDR_SHFT));
@@ -122,24 +66,15 @@
if (readl_poll_timeout(adpt->base + EMAC_MDIO_CTRL, reg,
!(reg & (MDIO_START | MDIO_BUSY)),
100, MDIO_WAIT_TIMES * 100))
- ret = -EIO;
- else
- ret = (reg >> MDIO_DATA_SHFT) & MDIO_DATA_BMSK;
+ return -EIO;
- emac_phy_mdio_autopoll_enable(adpt);
-
- return ret;
+ return (reg >> MDIO_DATA_SHFT) & MDIO_DATA_BMSK;
}
static int emac_mdio_write(struct mii_bus *bus, int addr, int regnum, u16 val)
{
struct emac_adapter *adpt = bus->priv;
u32 reg;
- int ret;
-
- ret = emac_phy_mdio_autopoll_disable(adpt);
- if (ret)
- return ret;
emac_reg_update32(adpt->base + EMAC_PHY_STS, PHY_ADDR_BMSK,
(addr << PHY_ADDR_SHFT));
@@ -155,11 +90,9 @@
if (readl_poll_timeout(adpt->base + EMAC_MDIO_CTRL, reg,
!(reg & (MDIO_START | MDIO_BUSY)), 100,
MDIO_WAIT_TIMES * 100))
- ret = -EIO;
+ return -EIO;
- emac_phy_mdio_autopoll_enable(adpt);
-
- return ret;
+ return 0;
}
/* Configure the MDIO bus and connect the external PHY */
diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c
index 28a8cdc..98a326f 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac.c
@@ -50,19 +50,7 @@
#define DMAR_DLY_CNT_DEF 15
#define DMAW_DLY_CNT_DEF 4
-#define IMR_NORMAL_MASK (\
- ISR_ERROR |\
- ISR_GPHY_LINK |\
- ISR_TX_PKT |\
- GPHY_WAKEUP_INT)
-
-#define IMR_EXTENDED_MASK (\
- SW_MAN_INT |\
- ISR_OVER |\
- ISR_ERROR |\
- ISR_GPHY_LINK |\
- ISR_TX_PKT |\
- GPHY_WAKEUP_INT)
+#define IMR_NORMAL_MASK (ISR_ERROR | ISR_OVER | ISR_TX_PKT)
#define ISR_TX_PKT (\
TX_PKT_INT |\
@@ -70,10 +58,6 @@
TX_PKT_INT2 |\
TX_PKT_INT3)
-#define ISR_GPHY_LINK (\
- GPHY_LINK_UP_INT |\
- GPHY_LINK_DOWN_INT)
-
#define ISR_OVER (\
RFD0_UR_INT |\
RFD1_UR_INT |\
@@ -187,10 +171,6 @@
if (status & ISR_OVER)
net_warn_ratelimited("warning: TX/RX overflow\n");
- /* link event */
- if (status & ISR_GPHY_LINK)
- phy_mac_interrupt(adpt->phydev, !!(status & GPHY_LINK_UP_INT));
-
exit:
/* enable the interrupt */
writel(irq->mask, adpt->base + EMAC_INT_MASK);
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 3cd7989..784782d 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -230,18 +230,6 @@
int ring_size;
int i;
- /* Free RX skb ringbuffer */
- if (priv->rx_skb[q]) {
- for (i = 0; i < priv->num_rx_ring[q]; i++)
- dev_kfree_skb(priv->rx_skb[q][i]);
- }
- kfree(priv->rx_skb[q]);
- priv->rx_skb[q] = NULL;
-
- /* Free aligned TX buffers */
- kfree(priv->tx_align[q]);
- priv->tx_align[q] = NULL;
-
if (priv->rx_ring[q]) {
for (i = 0; i < priv->num_rx_ring[q]; i++) {
struct ravb_ex_rx_desc *desc = &priv->rx_ring[q][i];
@@ -270,6 +258,18 @@
priv->tx_ring[q] = NULL;
}
+ /* Free RX skb ringbuffer */
+ if (priv->rx_skb[q]) {
+ for (i = 0; i < priv->num_rx_ring[q]; i++)
+ dev_kfree_skb(priv->rx_skb[q][i]);
+ }
+ kfree(priv->rx_skb[q]);
+ priv->rx_skb[q] = NULL;
+
+ /* Free aligned TX buffers */
+ kfree(priv->tx_align[q]);
+ priv->tx_align[q] = NULL;
+
/* Free TX skb ringbuffer.
* SKBs are freed by ravb_tx_free() call above.
*/
diff --git a/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c
index 489ef14..6a9c954 100644
--- a/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c
@@ -37,6 +37,7 @@
#define TSE_PCS_CONTROL_AN_EN_MASK BIT(12)
#define TSE_PCS_CONTROL_REG 0x00
#define TSE_PCS_CONTROL_RESTART_AN_MASK BIT(9)
+#define TSE_PCS_CTRL_AUTONEG_SGMII 0x1140
#define TSE_PCS_IF_MODE_REG 0x28
#define TSE_PCS_LINK_TIMER_0_REG 0x24
#define TSE_PCS_LINK_TIMER_1_REG 0x26
@@ -65,6 +66,7 @@
#define TSE_PCS_SW_RESET_TIMEOUT 100
#define TSE_PCS_USE_SGMII_AN_MASK BIT(1)
#define TSE_PCS_USE_SGMII_ENA BIT(0)
+#define TSE_PCS_IF_USE_SGMII 0x03
#define SGMII_ADAPTER_CTRL_REG 0x00
#define SGMII_ADAPTER_DISABLE 0x0001
@@ -101,7 +103,9 @@
{
int ret = 0;
- writew(TSE_PCS_USE_SGMII_ENA, base + TSE_PCS_IF_MODE_REG);
+ writew(TSE_PCS_IF_USE_SGMII, base + TSE_PCS_IF_MODE_REG);
+
+ writew(TSE_PCS_CTRL_AUTONEG_SGMII, base + TSE_PCS_CONTROL_REG);
writew(TSE_PCS_SGMII_LINK_TIMER_0, base + TSE_PCS_LINK_TIMER_0_REG);
writew(TSE_PCS_SGMII_LINK_TIMER_1, base + TSE_PCS_LINK_TIMER_1_REG);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
index aa64764..e0ef02f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
@@ -214,13 +214,13 @@
{
/* Context type from W/B descriptor must be zero */
if (le32_to_cpu(p->des3) & TDES3_CONTEXT_TYPE)
- return -EINVAL;
+ return 0;
/* Tx Timestamp Status is 1 so des0 and des1'll have valid values */
if (le32_to_cpu(p->des3) & TDES3_TIMESTAMP_STATUS)
- return 0;
+ return 1;
- return 1;
+ return 0;
}
static inline u64 dwmac4_get_timestamp(void *desc, u32 ats)
@@ -282,7 +282,10 @@
}
}
exit:
- return ret;
+ if (likely(ret == 0))
+ return 1;
+
+ return 0;
}
static void dwmac4_rd_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index a74c481..d16d11b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -434,14 +434,14 @@
return;
/* check tx tstamp status */
- if (!priv->hw->desc->get_tx_timestamp_status(p)) {
+ if (priv->hw->desc->get_tx_timestamp_status(p)) {
/* get the valid tstamp */
ns = priv->hw->desc->get_timestamp(p, priv->adv_ts);
memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
shhwtstamp.hwtstamp = ns_to_ktime(ns);
- netdev_info(priv->dev, "get valid TX hw timestamp %llu\n", ns);
+ netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
/* pass tstamp to stack */
skb_tstamp_tx(skb, &shhwtstamp);
}
@@ -468,19 +468,19 @@
return;
/* Check if timestamp is available */
- if (!priv->hw->desc->get_rx_timestamp_status(p, priv->adv_ts)) {
+ if (priv->hw->desc->get_rx_timestamp_status(p, priv->adv_ts)) {
/* For GMAC4, the valid timestamp is from CTX next desc. */
if (priv->plat->has_gmac4)
ns = priv->hw->desc->get_timestamp(np, priv->adv_ts);
else
ns = priv->hw->desc->get_timestamp(p, priv->adv_ts);
- netdev_info(priv->dev, "get valid RX hw timestamp %llu\n", ns);
+ netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
shhwtstamp = skb_hwtstamps(skb);
memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
shhwtstamp->hwtstamp = ns_to_ktime(ns);
} else {
- netdev_err(priv->dev, "cannot get RX hw timestamp\n");
+ netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
}
}
@@ -546,7 +546,10 @@
/* PTP v1, UDP, any kind of event packet */
config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
/* take time stamp for all event messages */
- snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
+ if (priv->plat->has_gmac4)
+ snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
+ else
+ snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
@@ -578,7 +581,10 @@
config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
ptp_v2 = PTP_TCR_TSVER2ENA;
/* take time stamp for all event messages */
- snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
+ if (priv->plat->has_gmac4)
+ snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
+ else
+ snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
@@ -612,7 +618,10 @@
config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
ptp_v2 = PTP_TCR_TSVER2ENA;
/* take time stamp for all event messages */
- snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
+ if (priv->plat->has_gmac4)
+ snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
+ else
+ snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
@@ -1208,7 +1217,7 @@
u32 rx_count = priv->plat->rx_queues_to_use;
unsigned int bfsize = 0;
int ret = -ENOMEM;
- u32 queue;
+ int queue;
int i;
if (priv->hw->mode->set_16kib_bfsize)
@@ -2724,7 +2733,7 @@
priv->hw->desc->prepare_tso_tx_desc(desc, 0, buff_size,
0, 1,
- (last_segment) && (buff_size < TSO_MAX_BUFF_SIZE),
+ (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
0, 0);
tmp_len -= TSO_MAX_BUFF_SIZE;
@@ -2947,7 +2956,8 @@
int i, csum_insertion = 0, is_jumbo = 0;
u32 queue = skb_get_queue_mapping(skb);
int nfrags = skb_shinfo(skb)->nr_frags;
- unsigned int entry, first_entry;
+ int entry;
+ unsigned int first_entry;
struct dma_desc *desc, *first;
struct stmmac_tx_queue *tx_q;
unsigned int enh_desc;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
index 48fb72f..f4b31d6 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
@@ -59,7 +59,8 @@
/* Enable Snapshot for Messages Relevant to Master */
#define PTP_TCR_TSMSTRENA BIT(15)
/* Select PTP packets for Taking Snapshots */
-#define PTP_TCR_SNAPTYPSEL_1 GENMASK(17, 16)
+#define PTP_TCR_SNAPTYPSEL_1 BIT(16)
+#define PTP_GMAC4_TCR_SNAPTYPSEL_1 GENMASK(17, 16)
/* Enable MAC address for PTP Frame Filtering */
#define PTP_TCR_TSENMACADDR BIT(18)
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 959fd12..199459b 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -1007,7 +1007,7 @@
dev->netdev_ops = &geneve_netdev_ops;
dev->ethtool_ops = &geneve_ethtool_ops;
- dev->destructor = free_netdev;
+ dev->needs_free_netdev = true;
SET_NETDEV_DEVTYPE(dev, &geneve_type);
@@ -1133,7 +1133,7 @@
/* make enough headroom for basic scenario */
encap_len = GENEVE_BASE_HLEN + ETH_HLEN;
- if (ip_tunnel_info_af(info) == AF_INET) {
+ if (!metadata && ip_tunnel_info_af(info) == AF_INET) {
encap_len += sizeof(struct iphdr);
dev->max_mtu -= sizeof(struct iphdr);
} else {
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index 7b652bb..ca110cd 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -611,7 +611,7 @@
static void gtp_link_setup(struct net_device *dev)
{
dev->netdev_ops = >p_netdev_ops;
- dev->destructor = free_netdev;
+ dev->needs_free_netdev = true;
dev->hard_header_len = 0;
dev->addr_len = 0;
diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
index 922bf44..021a8ec 100644
--- a/drivers/net/hamradio/6pack.c
+++ b/drivers/net/hamradio/6pack.c
@@ -311,7 +311,7 @@
{
/* Finish setting up the DEVICE info. */
dev->netdev_ops = &sp_netdev_ops;
- dev->destructor = free_netdev;
+ dev->needs_free_netdev = true;
dev->mtu = SIXP_MTU;
dev->hard_header_len = AX25_MAX_HEADER_LEN;
dev->header_ops = &ax25_header_ops;
diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c
index f62e7f3..78a6414 100644
--- a/drivers/net/hamradio/bpqether.c
+++ b/drivers/net/hamradio/bpqether.c
@@ -476,7 +476,7 @@
static void bpq_setup(struct net_device *dev)
{
dev->netdev_ops = &bpq_netdev_ops;
- dev->destructor = free_netdev;
+ dev->needs_free_netdev = true;
memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN);
memcpy(dev->dev_addr, &ax25_defaddr, AX25_ADDR_LEN);
diff --git a/drivers/net/hamradio/hdlcdrv.c b/drivers/net/hamradio/hdlcdrv.c
index 8c3633c..97e3bc6 100644
--- a/drivers/net/hamradio/hdlcdrv.c
+++ b/drivers/net/hamradio/hdlcdrv.c
@@ -576,6 +576,8 @@
case HDLCDRVCTL_CALIBRATE:
if(!capable(CAP_SYS_RAWIO))
return -EPERM;
+ if (s->par.bitrate <= 0)
+ return -EINVAL;
if (bi.data.calibrate > INT_MAX / s->par.bitrate)
return -EINVAL;
s->hdlctx.calibrate = bi.data.calibrate * s->par.bitrate / 16;
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index 262b2ea..6066f1b 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -171,6 +171,8 @@
spinlock_t request_lock;
struct list_head req_list;
+ struct work_struct mcast_work;
+
u8 hw_mac_adr[ETH_ALEN];
u8 rss_key[NETVSC_HASH_KEYLEN];
u16 ind_table[ITAB_NUM];
@@ -201,6 +203,7 @@
int rndis_filter_close(struct netvsc_device *nvdev);
int rndis_filter_device_add(struct hv_device *dev,
struct netvsc_device_info *info);
+void rndis_filter_update(struct netvsc_device *nvdev);
void rndis_filter_device_remove(struct hv_device *dev,
struct netvsc_device *nvdev);
int rndis_filter_set_rss_param(struct rndis_device *rdev,
@@ -211,7 +214,6 @@
struct vmbus_channel *channel,
void *data, u32 buflen);
-int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter);
int rndis_filter_set_device_mac(struct net_device *ndev, char *mac);
void netvsc_switch_datapath(struct net_device *nv_dev, bool vf);
@@ -696,7 +698,6 @@
/* list protection */
spinlock_t lock;
- struct work_struct work;
u32 msg_enable; /* debug level */
u32 tx_checksum_mask;
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 4421a6d..82d6c02 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -56,37 +56,12 @@
module_param(debug, int, S_IRUGO);
MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
-static void do_set_multicast(struct work_struct *w)
-{
- struct net_device_context *ndevctx =
- container_of(w, struct net_device_context, work);
- struct hv_device *device_obj = ndevctx->device_ctx;
- struct net_device *ndev = hv_get_drvdata(device_obj);
- struct netvsc_device *nvdev = rcu_dereference(ndevctx->nvdev);
- struct rndis_device *rdev;
-
- if (!nvdev)
- return;
-
- rdev = nvdev->extension;
- if (rdev == NULL)
- return;
-
- if (ndev->flags & IFF_PROMISC)
- rndis_filter_set_packet_filter(rdev,
- NDIS_PACKET_TYPE_PROMISCUOUS);
- else
- rndis_filter_set_packet_filter(rdev,
- NDIS_PACKET_TYPE_BROADCAST |
- NDIS_PACKET_TYPE_ALL_MULTICAST |
- NDIS_PACKET_TYPE_DIRECTED);
-}
-
static void netvsc_set_multicast_list(struct net_device *net)
{
struct net_device_context *net_device_ctx = netdev_priv(net);
+ struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
- schedule_work(&net_device_ctx->work);
+ rndis_filter_update(nvdev);
}
static int netvsc_open(struct net_device *net)
@@ -123,8 +98,6 @@
netif_tx_disable(net);
- /* Make sure netvsc_set_multicast_list doesn't re-enable filter! */
- cancel_work_sync(&net_device_ctx->work);
ret = rndis_filter_close(nvdev);
if (ret != 0) {
netdev_err(net, "unable to close device (ret %d).\n", ret);
@@ -1028,7 +1001,7 @@
static int netvsc_get_sset_count(struct net_device *dev, int string_set)
{
struct net_device_context *ndc = netdev_priv(dev);
- struct netvsc_device *nvdev = rcu_dereference(ndc->nvdev);
+ struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
if (!nvdev)
return -ENODEV;
@@ -1158,11 +1131,22 @@
}
#ifdef CONFIG_NET_POLL_CONTROLLER
-static void netvsc_poll_controller(struct net_device *net)
+static void netvsc_poll_controller(struct net_device *dev)
{
- /* As netvsc_start_xmit() works synchronous we don't have to
- * trigger anything here.
- */
+ struct net_device_context *ndc = netdev_priv(dev);
+ struct netvsc_device *ndev;
+ int i;
+
+ rcu_read_lock();
+ ndev = rcu_dereference(ndc->nvdev);
+ if (ndev) {
+ for (i = 0; i < ndev->num_chn; i++) {
+ struct netvsc_channel *nvchan = &ndev->chan_table[i];
+
+ napi_schedule(&nvchan->napi);
+ }
+ }
+ rcu_read_unlock();
}
#endif
@@ -1552,7 +1536,6 @@
hv_set_drvdata(dev, net);
INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change);
- INIT_WORK(&net_device_ctx->work, do_set_multicast);
spin_lock_init(&net_device_ctx->lock);
INIT_LIST_HEAD(&net_device_ctx->reconfig_events);
@@ -1622,7 +1605,6 @@
netif_device_detach(net);
cancel_delayed_work_sync(&ndev_ctx->dwork);
- cancel_work_sync(&ndev_ctx->work);
/*
* Call to the vsc driver to let it know that the device is being
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index f9d5b0b..cb79cd0 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -31,6 +31,7 @@
#include "hyperv_net.h"
+static void rndis_set_multicast(struct work_struct *w);
#define RNDIS_EXT_LEN PAGE_SIZE
struct rndis_request {
@@ -76,6 +77,7 @@
spin_lock_init(&device->request_lock);
INIT_LIST_HEAD(&device->req_list);
+ INIT_WORK(&device->mcast_work, rndis_set_multicast);
device->state = RNDIS_DEV_UNINITIALIZED;
@@ -815,7 +817,8 @@
return ret;
}
-int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter)
+static int rndis_filter_set_packet_filter(struct rndis_device *dev,
+ u32 new_filter)
{
struct rndis_request *request;
struct rndis_set_request *set;
@@ -846,6 +849,28 @@
return ret;
}
+static void rndis_set_multicast(struct work_struct *w)
+{
+ struct rndis_device *rdev
+ = container_of(w, struct rndis_device, mcast_work);
+
+ if (rdev->ndev->flags & IFF_PROMISC)
+ rndis_filter_set_packet_filter(rdev,
+ NDIS_PACKET_TYPE_PROMISCUOUS);
+ else
+ rndis_filter_set_packet_filter(rdev,
+ NDIS_PACKET_TYPE_BROADCAST |
+ NDIS_PACKET_TYPE_ALL_MULTICAST |
+ NDIS_PACKET_TYPE_DIRECTED);
+}
+
+void rndis_filter_update(struct netvsc_device *nvdev)
+{
+ struct rndis_device *rdev = nvdev->extension;
+
+ schedule_work(&rdev->mcast_work);
+}
+
static int rndis_filter_init_device(struct rndis_device *dev)
{
struct rndis_request *request;
@@ -973,6 +998,9 @@
if (dev->state != RNDIS_DEV_DATAINITIALIZED)
return 0;
+ /* Make sure rndis_set_multicast doesn't re-enable filter! */
+ cancel_work_sync(&dev->mcast_work);
+
ret = rndis_filter_set_packet_filter(dev, 0);
if (ret == -ENODEV)
ret = 0;
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
index 312fce7..144ea5a 100644
--- a/drivers/net/ifb.c
+++ b/drivers/net/ifb.c
@@ -207,7 +207,6 @@
__skb_queue_purge(&txp->tq);
}
kfree(dp->tx_private);
- free_netdev(dev);
}
static void ifb_setup(struct net_device *dev)
@@ -230,7 +229,8 @@
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
netif_keep_dst(dev);
eth_hw_addr_random(dev);
- dev->destructor = ifb_dev_free;
+ dev->needs_free_netdev = true;
+ dev->priv_destructor = ifb_dev_free;
}
static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev)
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index 618ed88..7c7680c 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -632,7 +632,7 @@
dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
dev->priv_flags |= IFF_UNICAST_FLT | IFF_NO_QUEUE;
dev->netdev_ops = &ipvlan_netdev_ops;
- dev->destructor = free_netdev;
+ dev->needs_free_netdev = true;
dev->header_ops = &ipvlan_header_ops;
dev->ethtool_ops = &ipvlan_ethtool_ops;
}
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index 224f65c..3061249 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -159,7 +159,6 @@
{
dev_net(dev)->loopback_dev = NULL;
free_percpu(dev->lstats);
- free_netdev(dev);
}
static const struct net_device_ops loopback_ops = {
@@ -196,7 +195,8 @@
dev->ethtool_ops = &loopback_ethtool_ops;
dev->header_ops = ð_header_ops;
dev->netdev_ops = &loopback_ops;
- dev->destructor = loopback_dev_free;
+ dev->needs_free_netdev = true;
+ dev->priv_destructor = loopback_dev_free;
}
/* Setup and register the loopback device. */
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index cdc347b..7941167 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -2996,7 +2996,6 @@
free_percpu(macsec->secy.tx_sc.stats);
dev_put(real_dev);
- free_netdev(dev);
}
static void macsec_setup(struct net_device *dev)
@@ -3006,7 +3005,8 @@
dev->max_mtu = ETH_MAX_MTU;
dev->priv_flags |= IFF_NO_QUEUE;
dev->netdev_ops = &macsec_netdev_ops;
- dev->destructor = macsec_free_netdev;
+ dev->needs_free_netdev = true;
+ dev->priv_destructor = macsec_free_netdev;
SET_NETDEV_DEVTYPE(dev, &macsec_type);
eth_zero_addr(dev->broadcast);
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 346ad2f..67bf7eb 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -1092,7 +1092,7 @@
netif_keep_dst(dev);
dev->priv_flags |= IFF_UNICAST_FLT;
dev->netdev_ops = &macvlan_netdev_ops;
- dev->destructor = free_netdev;
+ dev->needs_free_netdev = true;
dev->header_ops = &macvlan_hard_header_ops;
dev->ethtool_ops = &macvlan_ethtool_ops;
}
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
index 06ee639..0e27920 100644
--- a/drivers/net/netconsole.c
+++ b/drivers/net/netconsole.c
@@ -358,7 +358,7 @@
if (err)
goto out_unlock;
- pr_info("netconsole: network logging started\n");
+ pr_info("network logging started\n");
} else { /* false */
/* We need to disable the netconsole before cleaning it up
* otherwise we might end up in write_msg() with
diff --git a/drivers/net/nlmon.c b/drivers/net/nlmon.c
index b916038..c4b3362 100644
--- a/drivers/net/nlmon.c
+++ b/drivers/net/nlmon.c
@@ -113,7 +113,7 @@
dev->netdev_ops = &nlmon_ops;
dev->ethtool_ops = &nlmon_ethtool_ops;
- dev->destructor = free_netdev;
+ dev->needs_free_netdev = true;
dev->features = NETIF_F_SG | NETIF_F_FRAGLIST |
NETIF_F_HIGHDMA | NETIF_F_LLTX;
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index c360dd6..3ab6c58 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -127,6 +127,7 @@
tristate "ThunderX SOCs MDIO buses"
depends on 64BIT
depends on PCI
+ depends on !(MDIO_DEVICE=y && PHYLIB=m)
select MDIO_CAVIUM
help
This driver supports the MDIO interfaces found on Cavium
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 9097e42..57297ba 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -1127,8 +1127,6 @@
if (adv < 0)
return adv;
- lpa &= adv;
-
if (status & MII_M1011_PHY_STATUS_FULLDUPLEX)
phydev->duplex = DUPLEX_FULL;
else
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 8e73f5f..f99c21f 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -658,6 +658,18 @@
return 0;
}
+static int mdio_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ int rc;
+
+ /* Some devices have extra OF data and an OF-style MODALIAS */
+ rc = of_device_uevent_modalias(dev, env);
+ if (rc != -ENODEV)
+ return rc;
+
+ return 0;
+}
+
#ifdef CONFIG_PM
static int mdio_bus_suspend(struct device *dev)
{
@@ -708,6 +720,7 @@
struct bus_type mdio_bus_type = {
.name = "mdio_bus",
.match = mdio_bus_match,
+ .uevent = mdio_uevent,
.pm = MDIO_BUS_PM_OPS,
};
EXPORT_SYMBOL(mdio_bus_type);
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 6a5fd18..b9252b8d8 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -268,11 +268,31 @@
return ret;
}
+/* Some config bits need to be set again on resume, handle them here. */
+static int kszphy_config_reset(struct phy_device *phydev)
+{
+ struct kszphy_priv *priv = phydev->priv;
+ int ret;
+
+ if (priv->rmii_ref_clk_sel) {
+ ret = kszphy_rmii_clk_sel(phydev, priv->rmii_ref_clk_sel_val);
+ if (ret) {
+ phydev_err(phydev,
+ "failed to set rmii reference clock\n");
+ return ret;
+ }
+ }
+
+ if (priv->led_mode >= 0)
+ kszphy_setup_led(phydev, priv->type->led_mode_reg, priv->led_mode);
+
+ return 0;
+}
+
static int kszphy_config_init(struct phy_device *phydev)
{
struct kszphy_priv *priv = phydev->priv;
const struct kszphy_type *type;
- int ret;
if (!priv)
return 0;
@@ -285,19 +305,7 @@
if (type->has_nand_tree_disable)
kszphy_nand_tree_disable(phydev);
- if (priv->rmii_ref_clk_sel) {
- ret = kszphy_rmii_clk_sel(phydev, priv->rmii_ref_clk_sel_val);
- if (ret) {
- phydev_err(phydev,
- "failed to set rmii reference clock\n");
- return ret;
- }
- }
-
- if (priv->led_mode >= 0)
- kszphy_setup_led(phydev, type->led_mode_reg, priv->led_mode);
-
- return 0;
+ return kszphy_config_reset(phydev);
}
static int ksz8041_config_init(struct phy_device *phydev)
@@ -700,8 +708,14 @@
static int kszphy_resume(struct phy_device *phydev)
{
+ int ret;
+
genphy_resume(phydev);
+ ret = kszphy_config_reset(phydev);
+ if (ret)
+ return ret;
+
/* Enable PHY Interrupts */
if (phy_interrupt_is_valid(phydev)) {
phydev->interrupts = PHY_INTERRUPT_ENABLED;
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 82ab8fb..eebb0e1 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -54,6 +54,8 @@
return "5Gbps";
case SPEED_10000:
return "10Gbps";
+ case SPEED_14000:
+ return "14Gbps";
case SPEED_20000:
return "20Gbps";
case SPEED_25000:
@@ -241,7 +243,7 @@
* phy_lookup_setting - lookup a PHY setting
* @speed: speed to match
* @duplex: duplex to match
- * @feature: allowed link modes
+ * @features: allowed link modes
* @exact: an exact match is required
*
* Search the settings array for a setting that matches the speed and
diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c
index 1da31dc..74b9072 100644
--- a/drivers/net/slip/slip.c
+++ b/drivers/net/slip/slip.c
@@ -629,7 +629,7 @@
static void sl_free_netdev(struct net_device *dev)
{
int i = dev->base_addr;
- free_netdev(dev);
+
slip_devs[i] = NULL;
}
@@ -651,7 +651,8 @@
static void sl_setup(struct net_device *dev)
{
dev->netdev_ops = &sl_netdev_ops;
- dev->destructor = sl_free_netdev;
+ dev->needs_free_netdev = true;
+ dev->priv_destructor = sl_free_netdev;
dev->hard_header_len = 0;
dev->addr_len = 0;
@@ -1369,8 +1370,6 @@
if (sl->tty) {
printk(KERN_ERR "%s: tty discipline still running\n",
dev->name);
- /* Intentionally leak the control block. */
- dev->destructor = NULL;
}
unregister_netdev(dev);
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 6c5d5ef..fba8c13 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1643,7 +1643,6 @@
struct team *team = netdev_priv(dev);
free_percpu(team->pcpu_stats);
- free_netdev(dev);
}
static int team_open(struct net_device *dev)
@@ -2079,7 +2078,8 @@
dev->netdev_ops = &team_netdev_ops;
dev->ethtool_ops = &team_ethtool_ops;
- dev->destructor = team_destructor;
+ dev->needs_free_netdev = true;
+ dev->priv_destructor = team_destructor;
dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
dev->priv_flags |= IFF_NO_QUEUE;
dev->priv_flags |= IFF_TEAM;
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index bbd707b..9ee7d42 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1560,7 +1560,6 @@
free_percpu(tun->pcpu_stats);
tun_flow_uninit(tun);
security_tun_dev_free_security(tun->security);
- free_netdev(dev);
}
static void tun_setup(struct net_device *dev)
@@ -1571,7 +1570,8 @@
tun->group = INVALID_GID;
dev->ethtool_ops = &tun_ethtool_ops;
- dev->destructor = tun_free_netdev;
+ dev->needs_free_netdev = true;
+ dev->priv_destructor = tun_free_netdev;
/* We prefer our own queue length */
dev->tx_queue_len = TUN_READQ_SIZE;
}
diff --git a/drivers/net/usb/cdc-phonet.c b/drivers/net/usb/cdc-phonet.c
index eb52de8..c7a350b 100644
--- a/drivers/net/usb/cdc-phonet.c
+++ b/drivers/net/usb/cdc-phonet.c
@@ -298,7 +298,7 @@
dev->addr_len = 1;
dev->tx_queue_len = 3;
- dev->destructor = free_netdev;
+ dev->needs_free_netdev = true;
}
/*
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 8f923a1..32a22f4e 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -123,7 +123,7 @@
dev->addr_len = 0;
dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
dev->netdev_ops = &qmimux_netdev_ops;
- dev->destructor = free_netdev;
+ dev->needs_free_netdev = true;
}
static struct net_device *qmimux_find_dev(struct usbnet *dev, u8 mux_id)
@@ -1192,6 +1192,8 @@
{QMI_FIXED_INTF(0x1199, 0x9056, 8)}, /* Sierra Wireless Modem */
{QMI_FIXED_INTF(0x1199, 0x9057, 8)},
{QMI_FIXED_INTF(0x1199, 0x9061, 8)}, /* Sierra Wireless Modem */
+ {QMI_FIXED_INTF(0x1199, 0x9063, 8)}, /* Sierra Wireless EM7305 */
+ {QMI_FIXED_INTF(0x1199, 0x9063, 10)}, /* Sierra Wireless EM7305 */
{QMI_FIXED_INTF(0x1199, 0x9071, 8)}, /* Sierra Wireless MC74xx */
{QMI_FIXED_INTF(0x1199, 0x9071, 10)}, /* Sierra Wireless MC74xx */
{QMI_FIXED_INTF(0x1199, 0x9079, 8)}, /* Sierra Wireless EM74xx */
@@ -1206,6 +1208,8 @@
{QMI_FIXED_INTF(0x1bc7, 0x1100, 3)}, /* Telit ME910 */
{QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1201, 2)}, /* Telit LE920, LE920A4 */
+ {QMI_FIXED_INTF(0x1c9e, 0x9801, 3)}, /* Telewell TW-3G HSPA+ */
+ {QMI_FIXED_INTF(0x1c9e, 0x9803, 4)}, /* Telewell TW-3G HSPA+ */
{QMI_FIXED_INTF(0x1c9e, 0x9b01, 3)}, /* XS Stick W100-2 from 4G Systems */
{QMI_FIXED_INTF(0x0b3c, 0xc000, 4)}, /* Olivetti Olicard 100 */
{QMI_FIXED_INTF(0x0b3c, 0xc001, 4)}, /* Olivetti Olicard 120 */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index ddc62cb..1a419a4 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -4368,6 +4368,8 @@
break;
}
+ dev_dbg(&intf->dev, "Detected version 0x%04x\n", version);
+
return version;
}
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 38f0f03..0156fe8 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -222,7 +222,6 @@
static void veth_dev_free(struct net_device *dev)
{
free_percpu(dev->vstats);
- free_netdev(dev);
}
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -317,7 +316,8 @@
NETIF_F_HW_VLAN_STAG_TX |
NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_STAG_RX);
- dev->destructor = veth_dev_free;
+ dev->needs_free_netdev = true;
+ dev->priv_destructor = veth_dev_free;
dev->max_mtu = ETH_MAX_MTU;
dev->hw_features = VETH_FEATURES;
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 3e9246c..a871f45 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -869,7 +869,7 @@
unsigned int len;
len = hdr_len + clamp_t(unsigned int, ewma_pkt_len_read(avg_pkt_len),
- rq->min_buf_len - hdr_len, PAGE_SIZE - hdr_len);
+ rq->min_buf_len, PAGE_SIZE - hdr_len);
return ALIGN(len, L1_CACHE_BYTES);
}
@@ -2144,7 +2144,8 @@
unsigned int buf_len = hdr_len + ETH_HLEN + VLAN_HLEN + packet_len;
unsigned int min_buf_len = DIV_ROUND_UP(buf_len, rq_size);
- return max(min_buf_len, hdr_len);
+ return max(max(min_buf_len, hdr_len) - hdr_len,
+ (unsigned int)GOOD_PACKET_LEN);
}
static int virtnet_find_vqs(struct virtnet_info *vi)
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index db88249..022c0b5 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -36,12 +36,14 @@
#include <net/addrconf.h>
#include <net/l3mdev.h>
#include <net/fib_rules.h>
+#include <net/netns/generic.h>
#define DRV_NAME "vrf"
#define DRV_VERSION "1.0"
#define FIB_RULE_PREF 1000 /* default preference for FIB rules */
-static bool add_fib_rules = true;
+
+static unsigned int vrf_net_id;
struct net_vrf {
struct rtable __rcu *rth;
@@ -1348,7 +1350,7 @@
dev->netdev_ops = &vrf_netdev_ops;
dev->l3mdev_ops = &vrf_l3mdev_ops;
dev->ethtool_ops = &vrf_ethtool_ops;
- dev->destructor = free_netdev;
+ dev->needs_free_netdev = true;
/* Fill in device structure with ethernet-generic values. */
eth_hw_addr_random(dev);
@@ -1394,6 +1396,8 @@
struct nlattr *tb[], struct nlattr *data[])
{
struct net_vrf *vrf = netdev_priv(dev);
+ bool *add_fib_rules;
+ struct net *net;
int err;
if (!data || !data[IFLA_VRF_TABLE])
@@ -1409,13 +1413,15 @@
if (err)
goto out;
- if (add_fib_rules) {
+ net = dev_net(dev);
+ add_fib_rules = net_generic(net, vrf_net_id);
+ if (*add_fib_rules) {
err = vrf_add_fib_rules(dev);
if (err) {
unregister_netdevice(dev);
goto out;
}
- add_fib_rules = false;
+ *add_fib_rules = false;
}
out:
@@ -1498,16 +1504,38 @@
.notifier_call = vrf_device_event,
};
+/* Initialize per network namespace state */
+static int __net_init vrf_netns_init(struct net *net)
+{
+ bool *add_fib_rules = net_generic(net, vrf_net_id);
+
+ *add_fib_rules = true;
+
+ return 0;
+}
+
+static struct pernet_operations vrf_net_ops __net_initdata = {
+ .init = vrf_netns_init,
+ .id = &vrf_net_id,
+ .size = sizeof(bool),
+};
+
static int __init vrf_init_module(void)
{
int rc;
register_netdevice_notifier(&vrf_notifier_block);
- rc = rtnl_link_register(&vrf_link_ops);
+ rc = register_pernet_subsys(&vrf_net_ops);
if (rc < 0)
goto error;
+ rc = rtnl_link_register(&vrf_link_ops);
+ if (rc < 0) {
+ unregister_pernet_subsys(&vrf_net_ops);
+ goto error;
+ }
+
return 0;
error:
diff --git a/drivers/net/vsockmon.c b/drivers/net/vsockmon.c
index 7f0136f..c28bdce 100644
--- a/drivers/net/vsockmon.c
+++ b/drivers/net/vsockmon.c
@@ -135,7 +135,7 @@
dev->netdev_ops = &vsockmon_ops;
dev->ethtool_ops = &vsockmon_ethtool_ops;
- dev->destructor = free_netdev;
+ dev->needs_free_netdev = true;
dev->features = NETIF_F_SG | NETIF_F_FRAGLIST |
NETIF_F_HIGHDMA | NETIF_F_LLTX;
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 328b471..5fa798a 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -59,6 +59,8 @@
static int vxlan_sock_add(struct vxlan_dev *vxlan);
+static void vxlan_vs_del_dev(struct vxlan_dev *vxlan);
+
/* per-network namespace private data for this module */
struct vxlan_net {
struct list_head vxlan_list;
@@ -740,6 +742,22 @@
call_rcu(&f->rcu, vxlan_fdb_free);
}
+static void vxlan_dst_free(struct rcu_head *head)
+{
+ struct vxlan_rdst *rd = container_of(head, struct vxlan_rdst, rcu);
+
+ dst_cache_destroy(&rd->dst_cache);
+ kfree(rd);
+}
+
+static void vxlan_fdb_dst_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f,
+ struct vxlan_rdst *rd)
+{
+ list_del_rcu(&rd->list);
+ vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH);
+ call_rcu(&rd->rcu, vxlan_dst_free);
+}
+
static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan,
union vxlan_addr *ip, __be16 *port, __be32 *src_vni,
__be32 *vni, u32 *ifindex)
@@ -864,9 +882,7 @@
* otherwise destroy the fdb entry
*/
if (rd && !list_is_singular(&f->remotes)) {
- list_del_rcu(&rd->list);
- vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH);
- kfree_rcu(rd, rcu);
+ vxlan_fdb_dst_destroy(vxlan, f, rd);
goto out;
}
@@ -1067,6 +1083,8 @@
rcu_assign_pointer(vxlan->vn4_sock, NULL);
synchronize_net();
+ vxlan_vs_del_dev(vxlan);
+
if (__vxlan_sock_release_prep(sock4)) {
udp_tunnel_sock_release(sock4->sock);
kfree(sock4);
@@ -2342,6 +2360,15 @@
mod_timer(&vxlan->age_timer, next_timer);
}
+static void vxlan_vs_del_dev(struct vxlan_dev *vxlan)
+{
+ struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
+
+ spin_lock(&vn->sock_lock);
+ hlist_del_init_rcu(&vxlan->hlist);
+ spin_unlock(&vn->sock_lock);
+}
+
static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan)
{
struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
@@ -2584,7 +2611,7 @@
eth_hw_addr_random(dev);
ether_setup(dev);
- dev->destructor = free_netdev;
+ dev->needs_free_netdev = true;
SET_NETDEV_DEVTYPE(dev, &vxlan_type);
dev->features |= NETIF_F_LLTX;
@@ -3286,15 +3313,9 @@
static void vxlan_dellink(struct net_device *dev, struct list_head *head)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
- struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
vxlan_flush(vxlan, true);
- spin_lock(&vn->sock_lock);
- if (!hlist_unhashed(&vxlan->hlist))
- hlist_del_rcu(&vxlan->hlist);
- spin_unlock(&vn->sock_lock);
-
gro_cells_destroy(&vxlan->gro_cells);
list_del(&vxlan->next);
unregister_netdevice_queue(dev, head);
diff --git a/drivers/net/wan/dlci.c b/drivers/net/wan/dlci.c
index 65ee2a6..a0d76f7 100644
--- a/drivers/net/wan/dlci.c
+++ b/drivers/net/wan/dlci.c
@@ -475,7 +475,7 @@
dev->flags = 0;
dev->header_ops = &dlci_header_ops;
dev->netdev_ops = &dlci_netdev_ops;
- dev->destructor = free_netdev;
+ dev->needs_free_netdev = true;
dlp->receive = dlci_receive;
diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c
index eb91528..78596e4 100644
--- a/drivers/net/wan/hdlc_fr.c
+++ b/drivers/net/wan/hdlc_fr.c
@@ -1106,7 +1106,7 @@
return -EIO;
}
- dev->destructor = free_netdev;
+ dev->needs_free_netdev = true;
*get_dev_p(pvc, type) = dev;
if (!used) {
state(hdlc)->dce_changed = 1;
diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c
index 9df9ed6..63f7490 100644
--- a/drivers/net/wan/lapbether.c
+++ b/drivers/net/wan/lapbether.c
@@ -306,7 +306,7 @@
static void lapbeth_setup(struct net_device *dev)
{
dev->netdev_ops = &lapbeth_netdev_ops;
- dev->destructor = free_netdev;
+ dev->needs_free_netdev = true;
dev->type = ARPHRD_X25;
dev->hard_header_len = 3;
dev->mtu = 1000;
diff --git a/drivers/net/wireless/ath/ath6kl/main.c b/drivers/net/wireless/ath/ath6kl/main.c
index 91ee542..b90c77e 100644
--- a/drivers/net/wireless/ath/ath6kl/main.c
+++ b/drivers/net/wireless/ath/ath6kl/main.c
@@ -1287,7 +1287,7 @@
struct ath6kl *ar = ath6kl_priv(dev);
dev->netdev_ops = &ath6kl_netdev_ops;
- dev->destructor = free_netdev;
+ dev->needs_free_netdev = true;
dev->watchdog_timeo = ATH6KL_TX_TIMEOUT;
dev->needed_headroom = ETH_HLEN;
diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
index d5e993d..517a315 100644
--- a/drivers/net/wireless/ath/wcn36xx/main.c
+++ b/drivers/net/wireless/ath/wcn36xx/main.c
@@ -1271,6 +1271,8 @@
qcom_smem_state_put(wcn->tx_enable_state);
qcom_smem_state_put(wcn->tx_rings_empty_state);
+ rpmsg_destroy_ept(wcn->smd_channel);
+
iounmap(wcn->dxe_base);
iounmap(wcn->ccu_base);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index cd1d673..617199c 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -5225,7 +5225,6 @@
if (vif)
brcmf_free_vif(vif);
- free_netdev(ndev);
}
static bool brcmf_is_linkup(const struct brcmf_event_msg *e)
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
index a3d8236..511d190 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
@@ -624,7 +624,8 @@
if (!ndev)
return ERR_PTR(-ENOMEM);
- ndev->destructor = brcmf_cfg80211_free_netdev;
+ ndev->needs_free_netdev = true;
+ ndev->priv_destructor = brcmf_cfg80211_free_netdev;
ifp = netdev_priv(ndev);
ifp->ndev = ndev;
/* store mapping ifidx to bsscfgidx */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
index fc64b89..e034500 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
@@ -3422,7 +3422,7 @@
/* otherwise, set txglomalign */
value = sdiodev->settings->bus.sdio.sd_sgentry_align;
/* SDIO ADMA requires at least 32 bit alignment */
- value = max_t(u32, value, 4);
+ value = max_t(u32, value, ALIGNMENT);
err = brcmf_iovar_data_set(dev, "bus:txglomalign", &value,
sizeof(u32));
}
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-7000.c b/drivers/net/wireless/intel/iwlwifi/iwl-7000.c
index 3b3e076..45e2efc 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-7000.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-7000.c
@@ -79,8 +79,8 @@
/* Lowest firmware API version supported */
#define IWL7260_UCODE_API_MIN 17
#define IWL7265_UCODE_API_MIN 17
-#define IWL7265D_UCODE_API_MIN 17
-#define IWL3168_UCODE_API_MIN 20
+#define IWL7265D_UCODE_API_MIN 22
+#define IWL3168_UCODE_API_MIN 22
/* NVM versions */
#define IWL7260_NVM_VERSION 0x0a1d
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
index b9718c0..8913771 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
@@ -74,8 +74,8 @@
#define IWL8265_UCODE_API_MAX 30
/* Lowest firmware API version supported */
-#define IWL8000_UCODE_API_MIN 17
-#define IWL8265_UCODE_API_MIN 20
+#define IWL8000_UCODE_API_MIN 22
+#define IWL8265_UCODE_API_MIN 22
/* NVM versions */
#define IWL8000_NVM_VERSION 0x0a1d
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
index 306bc96..77efbb7 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
@@ -370,6 +370,7 @@
#define MON_DMARB_RD_DATA_ADDR (0xa03c5c)
#define DBGC_IN_SAMPLE (0xa03c00)
+#define DBGC_OUT_CTRL (0xa03c0c)
/* enable the ID buf for read */
#define WFPM_PS_CTL_CLR 0xA0300C
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rs.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rs.h
index 1b7d265..a10c6aa 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rs.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rs.h
@@ -307,6 +307,11 @@
/* Bit 1-3: LQ command color. Used to match responses to LQ commands */
#define LQ_FLAG_COLOR_POS 1
#define LQ_FLAG_COLOR_MSK (7 << LQ_FLAG_COLOR_POS)
+#define LQ_FLAG_COLOR_GET(_f) (((_f) & LQ_FLAG_COLOR_MSK) >>\
+ LQ_FLAG_COLOR_POS)
+#define LQ_FLAGS_COLOR_INC(_c) ((((_c) + 1) << LQ_FLAG_COLOR_POS) &\
+ LQ_FLAG_COLOR_MSK)
+#define LQ_FLAG_COLOR_SET(_f, _c) ((_c) | ((_f) & ~LQ_FLAG_COLOR_MSK))
/* Bit 4-5: Tx RTS BW Signalling
* (0) No RTS BW signalling
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h
index 81b9891..1360ebf 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h
@@ -519,8 +519,11 @@
* bit-7 invalid rate indication
*/
#define TX_RES_INIT_RATE_INDEX_MSK 0x0f
+#define TX_RES_RATE_TABLE_COLOR_POS 4
#define TX_RES_RATE_TABLE_COLOR_MSK 0x70
#define TX_RES_INV_RATE_INDEX_MSK 0x80
+#define TX_RES_RATE_TABLE_COL_GET(_f) (((_f) & TX_RES_RATE_TABLE_COLOR_MSK) >>\
+ TX_RES_RATE_TABLE_COLOR_POS)
#define IWL_MVM_TX_RES_GET_TID(_ra_tid) ((_ra_tid) & 0x0f)
#define IWL_MVM_TX_RES_GET_RA(_ra_tid) ((_ra_tid) >> 4)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c
index 7b86a4f..c8712e6 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c
@@ -1002,14 +1002,6 @@
return 0;
}
-static inline void iwl_mvm_restart_early_start(struct iwl_mvm *mvm)
-{
- if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000)
- iwl_clear_bits_prph(mvm->trans, MON_BUFF_SAMPLE_CTL, 0x100);
- else
- iwl_write_prph(mvm->trans, DBGC_IN_SAMPLE, 1);
-}
-
int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, u8 conf_id)
{
u8 *ptr;
@@ -1023,10 +1015,8 @@
/* EARLY START - firmware's configuration is hard coded */
if ((!mvm->fw->dbg_conf_tlv[conf_id] ||
!mvm->fw->dbg_conf_tlv[conf_id]->num_of_hcmds) &&
- conf_id == FW_DBG_START_FROM_ALIVE) {
- iwl_mvm_restart_early_start(mvm);
+ conf_id == FW_DBG_START_FROM_ALIVE)
return 0;
- }
if (!mvm->fw->dbg_conf_tlv[conf_id])
return -EINVAL;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
index 0f1831b..fd2fc46 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
@@ -1040,7 +1040,7 @@
struct iwl_mac_beacon_cmd_v6 beacon_cmd_v6;
struct iwl_mac_beacon_cmd_v7 beacon_cmd;
} u = {};
- struct iwl_mac_beacon_cmd beacon_cmd;
+ struct iwl_mac_beacon_cmd beacon_cmd = {};
struct ieee80211_tx_info *info;
u32 beacon_skb_len;
u32 rate, tx_flags;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index 4e74a6b..52f8d7a 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -1730,8 +1730,11 @@
*/
static inline u32 iwl_mvm_flushable_queues(struct iwl_mvm *mvm)
{
+ u32 cmd_queue = iwl_mvm_is_dqa_supported(mvm) ? IWL_MVM_DQA_CMD_QUEUE :
+ IWL_MVM_CMD_QUEUE;
+
return ((BIT(mvm->cfg->base_params->num_of_queues) - 1) &
- ~BIT(IWL_MVM_CMD_QUEUE));
+ ~BIT(cmd_queue));
}
static inline
@@ -1753,6 +1756,7 @@
if (!iwl_mvm_has_new_tx_api(mvm))
iwl_free_fw_paging(mvm);
mvm->ucode_loaded = false;
+ mvm->fw_dbg_conf = FW_DBG_INVALID;
iwl_trans_stop_device(mvm->trans);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index 9ffff6e..3da5ec4 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -1149,22 +1149,38 @@
mutex_lock(&mvm->mutex);
- /* stop recording */
if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
+ /* stop recording */
iwl_set_bits_prph(mvm->trans, MON_BUFF_SAMPLE_CTL, 0x100);
+
+ iwl_mvm_fw_error_dump(mvm);
+
+ /* start recording again if the firmware is not crashed */
+ if (!test_bit(STATUS_FW_ERROR, &mvm->trans->status) &&
+ mvm->fw->dbg_dest_tlv)
+ iwl_clear_bits_prph(mvm->trans,
+ MON_BUFF_SAMPLE_CTL, 0x100);
} else {
+ u32 in_sample = iwl_read_prph(mvm->trans, DBGC_IN_SAMPLE);
+ u32 out_ctrl = iwl_read_prph(mvm->trans, DBGC_OUT_CTRL);
+
+ /* stop recording */
iwl_write_prph(mvm->trans, DBGC_IN_SAMPLE, 0);
- /* wait before we collect the data till the DBGC stop */
udelay(100);
+ iwl_write_prph(mvm->trans, DBGC_OUT_CTRL, 0);
+ /* wait before we collect the data till the DBGC stop */
+ udelay(500);
+
+ iwl_mvm_fw_error_dump(mvm);
+
+ /* start recording again if the firmware is not crashed */
+ if (!test_bit(STATUS_FW_ERROR, &mvm->trans->status) &&
+ mvm->fw->dbg_dest_tlv) {
+ iwl_write_prph(mvm->trans, DBGC_IN_SAMPLE, in_sample);
+ iwl_write_prph(mvm->trans, DBGC_OUT_CTRL, out_ctrl);
+ }
}
- iwl_mvm_fw_error_dump(mvm);
-
- /* start recording again if the firmware is not crashed */
- WARN_ON_ONCE((!test_bit(STATUS_FW_ERROR, &mvm->trans->status)) &&
- mvm->fw->dbg_dest_tlv &&
- iwl_mvm_start_fw_dbg_conf(mvm, mvm->fw_dbg_conf));
-
mutex_unlock(&mvm->mutex);
iwl_mvm_unref(mvm, IWL_MVM_REF_FW_DBG_COLLECT);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
index 7788eef..aa785cf 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
@@ -2,7 +2,7 @@
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2016 Intel Deutschland GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -1083,34 +1083,6 @@
rs_get_lower_rate_in_column(lq_sta, rate);
}
-/* Check if both rates are identical
- * allow_ant_mismatch enables matching a SISO rate on ANT_A or ANT_B
- * with a rate indicating STBC/BFER and ANT_AB.
- */
-static inline bool rs_rate_equal(struct rs_rate *a,
- struct rs_rate *b,
- bool allow_ant_mismatch)
-
-{
- bool ant_match = (a->ant == b->ant) && (a->stbc == b->stbc) &&
- (a->bfer == b->bfer);
-
- if (allow_ant_mismatch) {
- if (a->stbc || a->bfer) {
- WARN_ONCE(a->ant != ANT_AB, "stbc %d bfer %d ant %d",
- a->stbc, a->bfer, a->ant);
- ant_match |= (b->ant == ANT_A || b->ant == ANT_B);
- } else if (b->stbc || b->bfer) {
- WARN_ONCE(b->ant != ANT_AB, "stbc %d bfer %d ant %d",
- b->stbc, b->bfer, b->ant);
- ant_match |= (a->ant == ANT_A || a->ant == ANT_B);
- }
- }
-
- return (a->type == b->type) && (a->bw == b->bw) && (a->sgi == b->sgi) &&
- (a->ldpc == b->ldpc) && (a->index == b->index) && ant_match;
-}
-
/* Check if both rates share the same column */
static inline bool rs_rate_column_match(struct rs_rate *a,
struct rs_rate *b)
@@ -1182,12 +1154,12 @@
u32 lq_hwrate;
struct rs_rate lq_rate, tx_resp_rate;
struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl;
- u8 reduced_txp = (uintptr_t)info->status.status_driver_data[0];
+ u32 tlc_info = (uintptr_t)info->status.status_driver_data[0];
+ u8 reduced_txp = tlc_info & RS_DRV_DATA_TXP_MSK;
+ u8 lq_color = RS_DRV_DATA_LQ_COLOR_GET(tlc_info);
u32 tx_resp_hwrate = (uintptr_t)info->status.status_driver_data[1];
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta;
- bool allow_ant_mismatch = fw_has_api(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_API_LQ_SS_PARAMS);
/* Treat uninitialized rate scaling data same as non-existing. */
if (!lq_sta) {
@@ -1262,10 +1234,10 @@
rs_rate_from_ucode_rate(lq_hwrate, info->band, &lq_rate);
/* Here we actually compare this rate to the latest LQ command */
- if (!rs_rate_equal(&tx_resp_rate, &lq_rate, allow_ant_mismatch)) {
+ if (lq_color != LQ_FLAG_COLOR_GET(table->flags)) {
IWL_DEBUG_RATE(mvm,
- "initial tx resp rate 0x%x does not match 0x%x\n",
- tx_resp_hwrate, lq_hwrate);
+ "tx resp color 0x%x does not match 0x%x\n",
+ lq_color, LQ_FLAG_COLOR_GET(table->flags));
/*
* Since rates mis-match, the last LQ command may have failed.
@@ -3326,6 +3298,7 @@
u8 valid_tx_ant = 0;
struct iwl_lq_cmd *lq_cmd = &lq_sta->lq;
bool toggle_ant = false;
+ u32 color;
memcpy(&rate, initial_rate, sizeof(rate));
@@ -3380,6 +3353,9 @@
num_rates, num_retries, valid_tx_ant,
toggle_ant);
+ /* update the color of the LQ command (as a counter at bits 1-3) */
+ color = LQ_FLAGS_COLOR_INC(LQ_FLAG_COLOR_GET(lq_cmd->flags));
+ lq_cmd->flags = LQ_FLAG_COLOR_SET(lq_cmd->flags, color);
}
struct rs_bfer_active_iter_data {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
index ee207f2..3abde1c 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
@@ -2,6 +2,7 @@
*
* Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -357,6 +358,20 @@
} pers;
};
+/* ieee80211_tx_info's status_driver_data[0] is packed with lq color and txp
+ * Note, it's iwlmvm <-> mac80211 interface.
+ * bits 0-7: reduced tx power
+ * bits 8-10: LQ command's color
+ */
+#define RS_DRV_DATA_TXP_MSK 0xff
+#define RS_DRV_DATA_LQ_COLOR_POS 8
+#define RS_DRV_DATA_LQ_COLOR_MSK (7 << RS_DRV_DATA_LQ_COLOR_POS)
+#define RS_DRV_DATA_LQ_COLOR_GET(_f) (((_f) & RS_DRV_DATA_LQ_COLOR_MSK) >>\
+ RS_DRV_DATA_LQ_COLOR_POS)
+#define RS_DRV_DATA_PACK(_c, _p) ((void *)(uintptr_t)\
+ (((uintptr_t)_p) |\
+ ((_c) << RS_DRV_DATA_LQ_COLOR_POS)))
+
/* Initialize station's rate scaling information after adding station */
void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
enum nl80211_band band, bool init);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index f5c786d..614d678 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -2120,7 +2120,8 @@
if (!iwl_mvm_is_dqa_supported(mvm))
return 0;
- if (WARN_ON(vif->type != NL80211_IFTYPE_AP))
+ if (WARN_ON(vif->type != NL80211_IFTYPE_AP &&
+ vif->type != NL80211_IFTYPE_ADHOC))
return -ENOTSUPP;
/*
@@ -2155,6 +2156,16 @@
mvmvif->cab_queue = queue;
} else if (!fw_has_api(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_API_STA_TYPE)) {
+ /*
+ * In IBSS, ieee80211_check_queues() sets the cab_queue to be
+ * invalid, so make sure we use the queue we want.
+ * Note that this is done here as we want to avoid making DQA
+ * changes in mac80211 layer.
+ */
+ if (vif->type == NL80211_IFTYPE_ADHOC) {
+ vif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE;
+ mvmvif->cab_queue = vif->cab_queue;
+ }
iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0,
&cfg, timeout);
}
@@ -3321,18 +3332,15 @@
/* Get the station from the mvm local station table */
mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
- if (!mvm_sta) {
- IWL_ERR(mvm, "Failed to find station\n");
- return -EINVAL;
- }
- sta_id = mvm_sta->sta_id;
+ if (mvm_sta)
+ sta_id = mvm_sta->sta_id;
IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n",
keyconf->keyidx, sta_id);
- if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
- keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
- keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
+ if (mvm_sta && (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
+ keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
+ keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256))
return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true);
if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
index 2716cb5..ad62b67 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
@@ -313,6 +313,7 @@
* This is basically (last acked packet++).
* @rate_n_flags: Rate at which Tx was attempted. Holds the data between the
* Tx response (TX_CMD), and the block ack notification (COMPRESSED_BA).
+ * @lq_color: the color of the LQ command as it appears in tx response.
* @amsdu_in_ampdu_allowed: true if A-MSDU in A-MPDU is allowed.
* @state: state of the BA agreement establishment / tear down.
* @txq_id: Tx queue used by the BA session / DQA
@@ -331,6 +332,7 @@
u16 next_reclaimed;
/* The rest is Tx AGG related */
u32 rate_n_flags;
+ u8 lq_color;
bool amsdu_in_ampdu_allowed;
enum iwl_mvm_agg_state state;
u16 txq_id;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
index f9cbd19..506d581 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
@@ -790,11 +790,13 @@
struct iwl_mvm *mvm = (struct iwl_mvm *)(cdev->devdata);
int ret;
- if (!mvm->ucode_loaded || !(mvm->cur_ucode == IWL_UCODE_REGULAR))
- return -EIO;
-
mutex_lock(&mvm->mutex);
+ if (!mvm->ucode_loaded || !(mvm->cur_ucode == IWL_UCODE_REGULAR)) {
+ ret = -EIO;
+ goto unlock;
+ }
+
if (new_state >= ARRAY_SIZE(iwl_mvm_cdev_budgets)) {
ret = -EINVAL;
goto unlock;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index bcaceb64..f21901c 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -1323,6 +1323,7 @@
struct iwl_mvm_sta *mvmsta;
struct sk_buff_head skbs;
u8 skb_freed = 0;
+ u8 lq_color;
u16 next_reclaimed, seq_ctl;
bool is_ndp = false;
@@ -1405,8 +1406,9 @@
info->status.tx_time =
le16_to_cpu(tx_resp->wireless_media_time);
BUILD_BUG_ON(ARRAY_SIZE(info->status.status_driver_data) < 1);
+ lq_color = TX_RES_RATE_TABLE_COL_GET(tx_resp->tlc_info);
info->status.status_driver_data[0] =
- (void *)(uintptr_t)tx_resp->reduced_tpc;
+ RS_DRV_DATA_PACK(lq_color, tx_resp->reduced_tpc);
ieee80211_tx_status(mvm->hw, skb);
}
@@ -1638,6 +1640,9 @@
le32_to_cpu(tx_resp->initial_rate);
mvmsta->tid_data[tid].tx_time =
le16_to_cpu(tx_resp->wireless_media_time);
+ mvmsta->tid_data[tid].lq_color =
+ (tx_resp->tlc_info & TX_RES_RATE_TABLE_COLOR_MSK) >>
+ TX_RES_RATE_TABLE_COLOR_POS;
}
rcu_read_unlock();
@@ -1707,6 +1712,11 @@
iwl_mvm_check_ratid_empty(mvm, sta, tid);
freed = 0;
+
+ /* pack lq color from tid_data along the reduced txp */
+ ba_info->status.status_driver_data[0] =
+ RS_DRV_DATA_PACK(tid_data->lq_color,
+ ba_info->status.status_driver_data[0]);
ba_info->status.status_driver_data[1] = (void *)(uintptr_t)rate;
skb_queue_walk(&reclaimed_skbs, skb) {
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index 70acf85..93cbc7a 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -2803,7 +2803,8 @@
#ifdef CONFIG_PM_SLEEP
static int iwl_trans_pcie_suspend(struct iwl_trans *trans)
{
- if (trans->runtime_pm_mode == IWL_PLAT_PM_MODE_D0I3)
+ if (trans->runtime_pm_mode == IWL_PLAT_PM_MODE_D0I3 &&
+ (trans->system_pm_mode == IWL_PLAT_PM_MODE_D0I3))
return iwl_pci_fw_enter_d0i3(trans);
return 0;
@@ -2811,7 +2812,8 @@
static void iwl_trans_pcie_resume(struct iwl_trans *trans)
{
- if (trans->runtime_pm_mode == IWL_PLAT_PM_MODE_D0I3)
+ if (trans->runtime_pm_mode == IWL_PLAT_PM_MODE_D0I3 &&
+ (trans->system_pm_mode == IWL_PLAT_PM_MODE_D0I3))
iwl_pci_fw_exit_d0i3(trans);
}
#endif /* CONFIG_PM_SLEEP */
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
index 9fb46a6f..9c9bfbb 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
@@ -906,7 +906,7 @@
if (WARN_ON(iwl_rx_packet_payload_len(hcmd.resp_pkt) != sizeof(*rsp))) {
ret = -EINVAL;
- goto error;
+ goto error_free_resp;
}
rsp = (void *)hcmd.resp_pkt->data;
@@ -915,13 +915,13 @@
if (qid > ARRAY_SIZE(trans_pcie->txq)) {
WARN_ONCE(1, "queue index %d unsupported", qid);
ret = -EIO;
- goto error;
+ goto error_free_resp;
}
if (test_and_set_bit(qid, trans_pcie->queue_used)) {
WARN_ONCE(1, "queue %d already used", qid);
ret = -EIO;
- goto error;
+ goto error_free_resp;
}
txq->id = qid;
@@ -934,8 +934,11 @@
(txq->write_ptr) | (qid << 16));
IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d\n", qid);
+ iwl_free_resp(&hcmd);
return qid;
+error_free_resp:
+ iwl_free_resp(&hcmd);
error:
iwl_pcie_gen2_txq_free_memory(trans, txq);
return ret;
diff --git a/drivers/net/wireless/intersil/hostap/hostap_main.c b/drivers/net/wireless/intersil/hostap/hostap_main.c
index 544fc09..1372b20 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_main.c
+++ b/drivers/net/wireless/intersil/hostap/hostap_main.c
@@ -73,7 +73,7 @@
dev->mem_end = mdev->mem_end;
hostap_setup_dev(dev, local, type);
- dev->destructor = free_netdev;
+ dev->needs_free_netdev = true;
sprintf(dev->name, "%s%s", prefix, name);
if (!rtnl_locked)
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 002b25c..c854a55 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -2861,7 +2861,7 @@
static void hwsim_mon_setup(struct net_device *dev)
{
dev->netdev_ops = &hwsim_netdev_ops;
- dev->destructor = free_netdev;
+ dev->needs_free_netdev = true;
ether_setup(dev);
dev->priv_flags |= IFF_NO_QUEUE;
dev->type = ARPHRD_IEEE80211_RADIOTAP;
diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c
index dd87b9f..39b6b5e 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.c
+++ b/drivers/net/wireless/marvell/mwifiex/main.c
@@ -1280,7 +1280,7 @@
struct net_device *dev)
{
dev->netdev_ops = &mwifiex_netdev_ops;
- dev->destructor = free_netdev;
+ dev->needs_free_netdev = true;
/* Initialize private structure */
priv->current_key_index = 0;
priv->media_connected = false;
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index a609264..903d581 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -56,7 +56,7 @@
static int nvme_char_major;
module_param(nvme_char_major, int, 0);
-static unsigned long default_ps_max_latency_us = 25000;
+static unsigned long default_ps_max_latency_us = 100000;
module_param(default_ps_max_latency_us, ulong, 0644);
MODULE_PARM_DESC(default_ps_max_latency_us,
"max power saving latency for new devices; use PM QOS to change per device");
@@ -1342,7 +1342,7 @@
* transitioning between power states. Therefore, when running
* in any given state, we will enter the next lower-power
* non-operational state after waiting 50 * (enlat + exlat)
- * microseconds, as long as that state's total latency is under
+ * microseconds, as long as that state's exit latency is under
* the requested maximum latency.
*
* We will not autonomously enter any non-operational state for
@@ -1387,7 +1387,7 @@
* lowest-power state, not the number of states.
*/
for (state = (int)ctrl->npss; state >= 0; state--) {
- u64 total_latency_us, transition_ms;
+ u64 total_latency_us, exit_latency_us, transition_ms;
if (target)
table->entries[state] = target;
@@ -1408,12 +1408,15 @@
NVME_PS_FLAGS_NON_OP_STATE))
continue;
- total_latency_us =
- (u64)le32_to_cpu(ctrl->psd[state].entry_lat) +
- + le32_to_cpu(ctrl->psd[state].exit_lat);
- if (total_latency_us > ctrl->ps_max_latency_us)
+ exit_latency_us =
+ (u64)le32_to_cpu(ctrl->psd[state].exit_lat);
+ if (exit_latency_us > ctrl->ps_max_latency_us)
continue;
+ total_latency_us =
+ exit_latency_us +
+ le32_to_cpu(ctrl->psd[state].entry_lat);
+
/*
* This state is good. Use it as the APST idle
* target for higher power states.
@@ -2438,6 +2441,10 @@
struct nvme_ns *ns;
mutex_lock(&ctrl->namespaces_mutex);
+
+ /* Forcibly start all queues to avoid having stuck requests */
+ blk_mq_start_hw_queues(ctrl->admin_q);
+
list_for_each_entry(ns, &ctrl->namespaces, list) {
/*
* Revalidating a dead namespace sets capacity to 0. This will
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 5b14cbe..92964ce 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -1139,6 +1139,7 @@
/* *********************** NVME Ctrl Routines **************************** */
static void __nvme_fc_final_op_cleanup(struct request *rq);
+static void nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg);
static int
nvme_fc_reinit_request(void *data, struct request *rq)
@@ -1265,7 +1266,7 @@
struct nvme_command *sqe = &op->cmd_iu.sqe;
__le16 status = cpu_to_le16(NVME_SC_SUCCESS << 1);
union nvme_result result;
- bool complete_rq;
+ bool complete_rq, terminate_assoc = true;
/*
* WARNING:
@@ -1294,6 +1295,14 @@
* fabricate a CQE, the following fields will not be set as they
* are not referenced:
* cqe.sqid, cqe.sqhd, cqe.command_id
+ *
+ * Failure or error of an individual i/o, in a transport
+ * detected fashion unrelated to the nvme completion status,
+ * potentially cause the initiator and target sides to get out
+ * of sync on SQ head/tail (aka outstanding io count allowed).
+ * Per FC-NVME spec, failure of an individual command requires
+ * the connection to be terminated, which in turn requires the
+ * association to be terminated.
*/
fc_dma_sync_single_for_cpu(ctrl->lport->dev, op->fcp_req.rspdma,
@@ -1359,6 +1368,8 @@
goto done;
}
+ terminate_assoc = false;
+
done:
if (op->flags & FCOP_FLAGS_AEN) {
nvme_complete_async_event(&queue->ctrl->ctrl, status, &result);
@@ -1366,7 +1377,7 @@
atomic_set(&op->state, FCPOP_STATE_IDLE);
op->flags = FCOP_FLAGS_AEN; /* clear other flags */
nvme_fc_ctrl_put(ctrl);
- return;
+ goto check_error;
}
complete_rq = __nvme_fc_fcpop_chk_teardowns(ctrl, op);
@@ -1379,6 +1390,10 @@
nvme_end_request(rq, status, result);
} else
__nvme_fc_final_op_cleanup(rq);
+
+check_error:
+ if (terminate_assoc)
+ nvme_fc_error_recovery(ctrl, "transport detected io error");
}
static int
@@ -2791,6 +2806,7 @@
ctrl->ctrl.opts = NULL;
/* initiate nvme ctrl ref counting teardown */
nvme_uninit_ctrl(&ctrl->ctrl);
+ nvme_put_ctrl(&ctrl->ctrl);
/* as we're past the point where we transition to the ref
* counting teardown path, if we return a bad pointer here,
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index d52701df..951042a 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -1367,7 +1367,7 @@
bool nssro = dev->subsystem && (csts & NVME_CSTS_NSSRO);
/* If there is a reset ongoing, we shouldn't reset again. */
- if (work_busy(&dev->reset_work))
+ if (dev->ctrl.state == NVME_CTRL_RESETTING)
return false;
/* We shouldn't reset unless the controller is on fatal error state
@@ -1903,7 +1903,7 @@
bool was_suspend = !!(dev->ctrl.ctrl_config & NVME_CC_SHN_NORMAL);
int result = -ENODEV;
- if (WARN_ON(dev->ctrl.state == NVME_CTRL_RESETTING))
+ if (WARN_ON(dev->ctrl.state != NVME_CTRL_RESETTING))
goto out;
/*
@@ -1913,9 +1913,6 @@
if (dev->ctrl.ctrl_config & NVME_CC_ENABLE)
nvme_dev_disable(dev, false);
- if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING))
- goto out;
-
result = nvme_pci_enable(dev);
if (result)
goto out;
@@ -2009,8 +2006,8 @@
{
if (!dev->ctrl.admin_q || blk_queue_dying(dev->ctrl.admin_q))
return -ENODEV;
- if (work_busy(&dev->reset_work))
- return -ENODEV;
+ if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING))
+ return -EBUSY;
if (!queue_work(nvme_workq, &dev->reset_work))
return -EBUSY;
return 0;
@@ -2136,6 +2133,7 @@
if (result)
goto release_pools;
+ nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING);
dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev));
queue_work(nvme_workq, &dev->reset_work);
@@ -2179,6 +2177,7 @@
nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING);
+ cancel_work_sync(&dev->reset_work);
pci_set_drvdata(pdev, NULL);
if (!pci_device_is_present(pdev)) {
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 28bd255..24397d3 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -753,28 +753,26 @@
if (ret)
goto requeue;
- blk_mq_start_stopped_hw_queues(ctrl->ctrl.admin_q, true);
-
ret = nvmf_connect_admin_queue(&ctrl->ctrl);
if (ret)
- goto stop_admin_q;
+ goto requeue;
set_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[0].flags);
ret = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap);
if (ret)
- goto stop_admin_q;
+ goto requeue;
nvme_start_keep_alive(&ctrl->ctrl);
if (ctrl->queue_count > 1) {
ret = nvme_rdma_init_io_queues(ctrl);
if (ret)
- goto stop_admin_q;
+ goto requeue;
ret = nvme_rdma_connect_io_queues(ctrl);
if (ret)
- goto stop_admin_q;
+ goto requeue;
}
changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
@@ -782,7 +780,6 @@
ctrl->ctrl.opts->nr_reconnects = 0;
if (ctrl->queue_count > 1) {
- nvme_start_queues(&ctrl->ctrl);
nvme_queue_scan(&ctrl->ctrl);
nvme_queue_async_events(&ctrl->ctrl);
}
@@ -791,8 +788,6 @@
return;
-stop_admin_q:
- blk_mq_stop_hw_queues(ctrl->ctrl.admin_q);
requeue:
dev_info(ctrl->ctrl.device, "Failed reconnect attempt %d\n",
ctrl->ctrl.opts->nr_reconnects);
@@ -823,6 +818,13 @@
blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
nvme_cancel_request, &ctrl->ctrl);
+ /*
+ * queues are not a live anymore, so restart the queues to fail fast
+ * new IO
+ */
+ blk_mq_start_stopped_hw_queues(ctrl->ctrl.admin_q, true);
+ nvme_start_queues(&ctrl->ctrl);
+
nvme_rdma_reconnect_or_remove(ctrl);
}
@@ -1433,7 +1435,7 @@
/*
* We cannot accept any other command until the Connect command has completed.
*/
-static inline bool nvme_rdma_queue_is_ready(struct nvme_rdma_queue *queue,
+static inline int nvme_rdma_queue_is_ready(struct nvme_rdma_queue *queue,
struct request *rq)
{
if (unlikely(!test_bit(NVME_RDMA_Q_LIVE, &queue->flags))) {
@@ -1441,11 +1443,22 @@
if (!blk_rq_is_passthrough(rq) ||
cmd->common.opcode != nvme_fabrics_command ||
- cmd->fabrics.fctype != nvme_fabrics_type_connect)
- return false;
+ cmd->fabrics.fctype != nvme_fabrics_type_connect) {
+ /*
+ * reconnecting state means transport disruption, which
+ * can take a long time and even might fail permanently,
+ * so we can't let incoming I/O be requeued forever.
+ * fail it fast to allow upper layers a chance to
+ * failover.
+ */
+ if (queue->ctrl->ctrl.state == NVME_CTRL_RECONNECTING)
+ return -EIO;
+ else
+ return -EAGAIN;
+ }
}
- return true;
+ return 0;
}
static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
@@ -1463,8 +1476,9 @@
WARN_ON_ONCE(rq->tag < 0);
- if (!nvme_rdma_queue_is_ready(queue, rq))
- return BLK_MQ_RQ_QUEUE_BUSY;
+ ret = nvme_rdma_queue_is_ready(queue, rq);
+ if (unlikely(ret))
+ goto err;
dev = queue->device->dev;
ib_dma_sync_single_for_cpu(dev, sqe->dma,
diff --git a/drivers/of/device.c b/drivers/of/device.c
index 9416d05..28c38c7 100644
--- a/drivers/of/device.c
+++ b/drivers/of/device.c
@@ -144,8 +144,8 @@
coherent ? " " : " not ");
iommu = of_iommu_configure(dev, np);
- if (IS_ERR(iommu))
- return PTR_ERR(iommu);
+ if (IS_ERR(iommu) && PTR_ERR(iommu) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
dev_dbg(dev, "device is%sbehind an iommu\n",
iommu ? " " : " not ");
diff --git a/drivers/pci/access.c b/drivers/pci/access.c
index 74cf5ff..c80e37a 100644
--- a/drivers/pci/access.c
+++ b/drivers/pci/access.c
@@ -896,7 +896,7 @@
{
if (pci_dev_is_disconnected(dev)) {
*val = ~0;
- return -ENODEV;
+ return PCIBIOS_DEVICE_NOT_FOUND;
}
return pci_bus_read_config_byte(dev->bus, dev->devfn, where, val);
}
@@ -906,7 +906,7 @@
{
if (pci_dev_is_disconnected(dev)) {
*val = ~0;
- return -ENODEV;
+ return PCIBIOS_DEVICE_NOT_FOUND;
}
return pci_bus_read_config_word(dev->bus, dev->devfn, where, val);
}
@@ -917,7 +917,7 @@
{
if (pci_dev_is_disconnected(dev)) {
*val = ~0;
- return -ENODEV;
+ return PCIBIOS_DEVICE_NOT_FOUND;
}
return pci_bus_read_config_dword(dev->bus, dev->devfn, where, val);
}
@@ -926,7 +926,7 @@
int pci_write_config_byte(const struct pci_dev *dev, int where, u8 val)
{
if (pci_dev_is_disconnected(dev))
- return -ENODEV;
+ return PCIBIOS_DEVICE_NOT_FOUND;
return pci_bus_write_config_byte(dev->bus, dev->devfn, where, val);
}
EXPORT_SYMBOL(pci_write_config_byte);
@@ -934,7 +934,7 @@
int pci_write_config_word(const struct pci_dev *dev, int where, u16 val)
{
if (pci_dev_is_disconnected(dev))
- return -ENODEV;
+ return PCIBIOS_DEVICE_NOT_FOUND;
return pci_bus_write_config_word(dev->bus, dev->devfn, where, val);
}
EXPORT_SYMBOL(pci_write_config_word);
@@ -943,7 +943,7 @@
u32 val)
{
if (pci_dev_is_disconnected(dev))
- return -ENODEV;
+ return PCIBIOS_DEVICE_NOT_FOUND;
return pci_bus_write_config_dword(dev->bus, dev->devfn, where, val);
}
EXPORT_SYMBOL(pci_write_config_dword);
diff --git a/drivers/pci/endpoint/functions/Kconfig b/drivers/pci/endpoint/functions/Kconfig
index 175edad..2942066 100644
--- a/drivers/pci/endpoint/functions/Kconfig
+++ b/drivers/pci/endpoint/functions/Kconfig
@@ -5,6 +5,7 @@
config PCI_EPF_TEST
tristate "PCI Endpoint Test driver"
depends on PCI_ENDPOINT
+ select CRC32
help
Enable this configuration option to enable the test driver
for PCI Endpoint.
diff --git a/drivers/phy/phy-qcom-qmp.c b/drivers/phy/phy-qcom-qmp.c
index 727e23b..78ca628 100644
--- a/drivers/phy/phy-qcom-qmp.c
+++ b/drivers/phy/phy-qcom-qmp.c
@@ -844,7 +844,7 @@
int num = qmp->cfg->num_vregs;
int i;
- qmp->vregs = devm_kcalloc(dev, num, sizeof(qmp->vregs), GFP_KERNEL);
+ qmp->vregs = devm_kcalloc(dev, num, sizeof(*qmp->vregs), GFP_KERNEL);
if (!qmp->vregs)
return -ENOMEM;
@@ -983,16 +983,16 @@
* Resources are indexed as: tx -> 0; rx -> 1; pcs -> 2.
*/
qphy->tx = of_iomap(np, 0);
- if (IS_ERR(qphy->tx))
- return PTR_ERR(qphy->tx);
+ if (!qphy->tx)
+ return -ENOMEM;
qphy->rx = of_iomap(np, 1);
- if (IS_ERR(qphy->rx))
- return PTR_ERR(qphy->rx);
+ if (!qphy->rx)
+ return -ENOMEM;
qphy->pcs = of_iomap(np, 2);
- if (IS_ERR(qphy->pcs))
- return PTR_ERR(qphy->pcs);
+ if (!qphy->pcs)
+ return -ENOMEM;
/*
* Get PHY's Pipe clock, if any. USB3 and PCIe are PIPE3
diff --git a/drivers/platform/goldfish/goldfish_pipe.c b/drivers/platform/goldfish/goldfish_pipe.c
index 2de1e60..5f36721 100644
--- a/drivers/platform/goldfish/goldfish_pipe.c
+++ b/drivers/platform/goldfish/goldfish_pipe.c
@@ -704,7 +704,7 @@
/* Reallocate the array */
u32 new_capacity = 2 * dev->pipes_capacity;
struct goldfish_pipe **pipes =
- kcalloc(new_capacity, sizeof(*pipes), GFP_KERNEL);
+ kcalloc(new_capacity, sizeof(*pipes), GFP_ATOMIC);
if (!pipes)
return -ENOMEM;
memcpy(pipes, dev->pipes, sizeof(*pipes) * dev->pipes_capacity);
diff --git a/drivers/platform/x86/intel_telemetry_debugfs.c b/drivers/platform/x86/intel_telemetry_debugfs.c
index ef29f18..4cc2f4e 100644
--- a/drivers/platform/x86/intel_telemetry_debugfs.c
+++ b/drivers/platform/x86/intel_telemetry_debugfs.c
@@ -97,11 +97,9 @@
} \
}
-#ifdef CONFIG_PM_SLEEP
static u8 suspend_prep_ok;
static u32 suspend_shlw_ctr_temp, suspend_deep_ctr_temp;
static u64 suspend_shlw_res_temp, suspend_deep_res_temp;
-#endif
struct telemetry_susp_stats {
u32 shlw_swake_ctr;
@@ -807,7 +805,6 @@
.release = single_release,
};
-#ifdef CONFIG_PM_SLEEP
static int pm_suspend_prep_cb(void)
{
struct telemetry_evtlog evtlog[TELEM_MAX_OS_ALLOCATED_EVENTS];
@@ -937,7 +934,6 @@
static struct notifier_block pm_notifier = {
.notifier_call = pm_notification,
};
-#endif /* CONFIG_PM_SLEEP */
static int __init telemetry_debugfs_init(void)
{
@@ -960,14 +956,13 @@
if (err < 0)
return -EINVAL;
-
-#ifdef CONFIG_PM_SLEEP
register_pm_notifier(&pm_notifier);
-#endif /* CONFIG_PM_SLEEP */
debugfs_conf->telemetry_dbg_dir = debugfs_create_dir("telemetry", NULL);
- if (!debugfs_conf->telemetry_dbg_dir)
- return -ENOMEM;
+ if (!debugfs_conf->telemetry_dbg_dir) {
+ err = -ENOMEM;
+ goto out_pm;
+ }
f = debugfs_create_file("pss_info", S_IFREG | S_IRUGO,
debugfs_conf->telemetry_dbg_dir, NULL,
@@ -1014,6 +1009,8 @@
out:
debugfs_remove_recursive(debugfs_conf->telemetry_dbg_dir);
debugfs_conf->telemetry_dbg_dir = NULL;
+out_pm:
+ unregister_pm_notifier(&pm_notifier);
return err;
}
@@ -1022,6 +1019,7 @@
{
debugfs_remove_recursive(debugfs_conf->telemetry_dbg_dir);
debugfs_conf->telemetry_dbg_dir = NULL;
+ unregister_pm_notifier(&pm_notifier);
}
late_initcall(telemetry_debugfs_init);
diff --git a/drivers/reset/hisilicon/hi6220_reset.c b/drivers/reset/hisilicon/hi6220_reset.c
index 35ce53e..d5e5229 100644
--- a/drivers/reset/hisilicon/hi6220_reset.c
+++ b/drivers/reset/hisilicon/hi6220_reset.c
@@ -155,3 +155,5 @@
}
postcore_initcall(hi6220_reset_init);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/s390/cio/vfio_ccw_ops.c b/drivers/s390/cio/vfio_ccw_ops.c
index e72abbc..a66a317 100644
--- a/drivers/s390/cio/vfio_ccw_ops.c
+++ b/drivers/s390/cio/vfio_ccw_ops.c
@@ -70,14 +70,14 @@
{
return sprintf(buf, "I/O subchannel (Non-QDIO)\n");
}
-MDEV_TYPE_ATTR_RO(name);
+static MDEV_TYPE_ATTR_RO(name);
static ssize_t device_api_show(struct kobject *kobj, struct device *dev,
char *buf)
{
return sprintf(buf, "%s\n", VFIO_DEVICE_API_CCW_STRING);
}
-MDEV_TYPE_ATTR_RO(device_api);
+static MDEV_TYPE_ATTR_RO(device_api);
static ssize_t available_instances_show(struct kobject *kobj,
struct device *dev, char *buf)
@@ -86,7 +86,7 @@
return sprintf(buf, "%d\n", atomic_read(&private->avail));
}
-MDEV_TYPE_ATTR_RO(available_instances);
+static MDEV_TYPE_ATTR_RO(available_instances);
static struct attribute *mdev_types_attrs[] = {
&mdev_type_attr_name.attr,
@@ -100,7 +100,7 @@
.attrs = mdev_types_attrs,
};
-struct attribute_group *mdev_type_groups[] = {
+static struct attribute_group *mdev_type_groups[] = {
&mdev_type_group,
NULL,
};
@@ -152,7 +152,7 @@
&events, &private->nb);
}
-void vfio_ccw_mdev_release(struct mdev_device *mdev)
+static void vfio_ccw_mdev_release(struct mdev_device *mdev)
{
struct vfio_ccw_private *private =
dev_get_drvdata(mdev_parent_dev(mdev));
@@ -233,7 +233,7 @@
}
}
-int vfio_ccw_mdev_get_irq_info(struct vfio_irq_info *info)
+static int vfio_ccw_mdev_get_irq_info(struct vfio_irq_info *info)
{
if (info->index != VFIO_CCW_IO_IRQ_INDEX)
return -EINVAL;
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 9be4596..ea09991 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -668,10 +668,28 @@
struct ap_driver *ap_drv = to_ap_drv(dev->driver);
int rc;
+ /* Add queue/card to list of active queues/cards */
+ spin_lock_bh(&ap_list_lock);
+ if (is_card_dev(dev))
+ list_add(&to_ap_card(dev)->list, &ap_card_list);
+ else
+ list_add(&to_ap_queue(dev)->list,
+ &to_ap_queue(dev)->card->queues);
+ spin_unlock_bh(&ap_list_lock);
+
ap_dev->drv = ap_drv;
rc = ap_drv->probe ? ap_drv->probe(ap_dev) : -ENODEV;
- if (rc)
+
+ if (rc) {
+ spin_lock_bh(&ap_list_lock);
+ if (is_card_dev(dev))
+ list_del_init(&to_ap_card(dev)->list);
+ else
+ list_del_init(&to_ap_queue(dev)->list);
+ spin_unlock_bh(&ap_list_lock);
ap_dev->drv = NULL;
+ }
+
return rc;
}
@@ -680,14 +698,17 @@
struct ap_device *ap_dev = to_ap_dev(dev);
struct ap_driver *ap_drv = ap_dev->drv;
+ if (ap_drv->remove)
+ ap_drv->remove(ap_dev);
+
+ /* Remove queue/card from list of active queues/cards */
spin_lock_bh(&ap_list_lock);
if (is_card_dev(dev))
list_del_init(&to_ap_card(dev)->list);
else
list_del_init(&to_ap_queue(dev)->list);
spin_unlock_bh(&ap_list_lock);
- if (ap_drv->remove)
- ap_drv->remove(ap_dev);
+
return 0;
}
@@ -1056,10 +1077,6 @@
}
/* get it and thus adjust reference counter */
get_device(&ac->ap_dev.device);
- /* Add card device to card list */
- spin_lock_bh(&ap_list_lock);
- list_add(&ac->list, &ap_card_list);
- spin_unlock_bh(&ap_list_lock);
}
/* now create the new queue device */
aq = ap_queue_create(qid, type);
@@ -1070,10 +1087,6 @@
aq->ap_dev.device.parent = &ac->ap_dev.device;
dev_set_name(&aq->ap_dev.device,
"%02x.%04x", id, dom);
- /* Add queue device to card queue list */
- spin_lock_bh(&ap_list_lock);
- list_add(&aq->list, &ac->queues);
- spin_unlock_bh(&ap_list_lock);
/* Start with a device reset */
spin_lock_bh(&aq->lock);
ap_wait(ap_sm_event(aq, AP_EVENT_POLL));
@@ -1081,9 +1094,6 @@
/* Register device */
rc = device_register(&aq->ap_dev.device);
if (rc) {
- spin_lock_bh(&ap_list_lock);
- list_del_init(&aq->list);
- spin_unlock_bh(&ap_list_lock);
put_device(&aq->ap_dev.device);
continue;
}
diff --git a/drivers/s390/crypto/ap_card.c b/drivers/s390/crypto/ap_card.c
index cfa161c..836efac9 100644
--- a/drivers/s390/crypto/ap_card.c
+++ b/drivers/s390/crypto/ap_card.c
@@ -160,7 +160,14 @@
static void ap_card_device_release(struct device *dev)
{
- kfree(to_ap_card(dev));
+ struct ap_card *ac = to_ap_card(dev);
+
+ if (!list_empty(&ac->list)) {
+ spin_lock_bh(&ap_list_lock);
+ list_del_init(&ac->list);
+ spin_unlock_bh(&ap_list_lock);
+ }
+ kfree(ac);
}
struct ap_card *ap_card_create(int id, int queue_depth, int device_type,
diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c
index 480c58a..0f1a5d0 100644
--- a/drivers/s390/crypto/ap_queue.c
+++ b/drivers/s390/crypto/ap_queue.c
@@ -584,7 +584,14 @@
static void ap_queue_device_release(struct device *dev)
{
- kfree(to_ap_queue(dev));
+ struct ap_queue *aq = to_ap_queue(dev);
+
+ if (!list_empty(&aq->list)) {
+ spin_lock_bh(&ap_list_lock);
+ list_del_init(&aq->list);
+ spin_unlock_bh(&ap_list_lock);
+ }
+ kfree(aq);
}
struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type)
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index dba94b4..fa732bd 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -1954,7 +1954,6 @@
privptr->conn = NULL; privptr->fsm = NULL;
/* privptr gets freed by free_netdev() */
}
- free_netdev(dev);
}
/**
@@ -1972,7 +1971,8 @@
dev->mtu = NETIUCV_MTU_DEFAULT;
dev->min_mtu = 576;
dev->max_mtu = NETIUCV_MTU_MAX;
- dev->destructor = netiucv_free_netdevice;
+ dev->needs_free_netdev = true;
+ dev->priv_destructor = netiucv_free_netdevice;
dev->hard_header_len = NETIUCV_HDRLEN;
dev->addr_len = 0;
dev->type = ARPHRD_SLIP;
diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h
index 4fc8ed5..1f424e4 100644
--- a/drivers/scsi/bnx2fc/bnx2fc.h
+++ b/drivers/scsi/bnx2fc/bnx2fc.h
@@ -191,6 +191,7 @@
struct bnx2fc_cmd_mgr *cmd_mgr;
spinlock_t hba_lock;
struct mutex hba_mutex;
+ struct mutex hba_stats_mutex;
unsigned long adapter_state;
#define ADAPTER_STATE_UP 0
#define ADAPTER_STATE_GOING_DOWN 1
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index 93b5a00..902722d 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -663,15 +663,17 @@
if (!fw_stats)
return NULL;
+ mutex_lock(&hba->hba_stats_mutex);
+
bnx2fc_stats = fc_get_host_stats(shost);
init_completion(&hba->stat_req_done);
if (bnx2fc_send_stat_req(hba))
- return bnx2fc_stats;
+ goto unlock_stats_mutex;
rc = wait_for_completion_timeout(&hba->stat_req_done, (2 * HZ));
if (!rc) {
BNX2FC_HBA_DBG(lport, "FW stat req timed out\n");
- return bnx2fc_stats;
+ goto unlock_stats_mutex;
}
BNX2FC_STATS(hba, rx_stat2, fc_crc_cnt);
bnx2fc_stats->invalid_crc_count += hba->bfw_stats.fc_crc_cnt;
@@ -693,6 +695,9 @@
memcpy(&hba->prev_stats, hba->stats_buffer,
sizeof(struct fcoe_statistics_params));
+
+unlock_stats_mutex:
+ mutex_unlock(&hba->hba_stats_mutex);
return bnx2fc_stats;
}
@@ -1340,6 +1345,7 @@
}
spin_lock_init(&hba->hba_lock);
mutex_init(&hba->hba_mutex);
+ mutex_init(&hba->hba_stats_mutex);
hba->cnic = cnic;
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index 1076c15..0aae094 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -1595,7 +1595,6 @@
cxgbi_sock_put(csk);
}
csk->dst = NULL;
- csk->cdev = NULL;
}
static int init_act_open(struct cxgbi_sock *csk)
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index bd7d39e..e4c83b7 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -867,7 +867,8 @@
log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n",
csk, (csk)->state, (csk)->flags, (csk)->tid);
spin_lock_bh(&csk->lock);
- dst_confirm(csk->dst);
+ if (csk->dst)
+ dst_confirm(csk->dst);
data_lost = skb_queue_len(&csk->receive_queue);
__skb_queue_purge(&csk->receive_queue);
@@ -882,7 +883,8 @@
}
if (close_req) {
- if (data_lost)
+ if (!cxgbi_sock_flag(csk, CTPF_LOGOUT_RSP_RCVD) ||
+ data_lost)
csk->cdev->csk_send_abort_req(csk);
else
csk->cdev->csk_send_close_req(csk);
@@ -1186,9 +1188,10 @@
cxgbi_ulp_extra_len(cxgbi_skcb_ulp_mode(skb));
skb = next;
}
-done:
+
if (likely(skb_queue_len(&csk->write_queue)))
cdev->csk_push_tx_frames(csk, 1);
+done:
spin_unlock_bh(&csk->lock);
return copied;
@@ -1568,9 +1571,12 @@
}
}
-static int skb_read_pdu_bhs(struct iscsi_conn *conn, struct sk_buff *skb)
+static int
+skb_read_pdu_bhs(struct cxgbi_sock *csk, struct iscsi_conn *conn,
+ struct sk_buff *skb)
{
struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+ int err;
log_debug(1 << CXGBI_DBG_PDU_RX,
"conn 0x%p, skb 0x%p, len %u, flag 0x%lx.\n",
@@ -1608,7 +1614,16 @@
}
}
- return read_pdu_skb(conn, skb, 0, 0);
+ err = read_pdu_skb(conn, skb, 0, 0);
+ if (likely(err >= 0)) {
+ struct iscsi_hdr *hdr = (struct iscsi_hdr *)skb->data;
+ u8 opcode = hdr->opcode & ISCSI_OPCODE_MASK;
+
+ if (unlikely(opcode == ISCSI_OP_LOGOUT_RSP))
+ cxgbi_sock_set_flag(csk, CTPF_LOGOUT_RSP_RCVD);
+ }
+
+ return err;
}
static int skb_read_pdu_data(struct iscsi_conn *conn, struct sk_buff *lskb,
@@ -1713,7 +1728,7 @@
cxgbi_skcb_rx_pdulen(skb));
if (cxgbi_skcb_test_flag(skb, SKCBF_RX_COALESCED)) {
- err = skb_read_pdu_bhs(conn, skb);
+ err = skb_read_pdu_bhs(csk, conn, skb);
if (err < 0) {
pr_err("coalesced bhs, csk 0x%p, skb 0x%p,%u, "
"f 0x%lx, plen %u.\n",
@@ -1731,7 +1746,7 @@
cxgbi_skcb_flags(skb),
cxgbi_skcb_rx_pdulen(skb));
} else {
- err = skb_read_pdu_bhs(conn, skb);
+ err = skb_read_pdu_bhs(csk, conn, skb);
if (err < 0) {
pr_err("bhs, csk 0x%p, skb 0x%p,%u, "
"f 0x%lx, plen %u.\n",
@@ -1873,6 +1888,11 @@
tcp_task->dd_data = tdata;
task->hdr = NULL;
+ if (tdata->skb) {
+ kfree_skb(tdata->skb);
+ tdata->skb = NULL;
+ }
+
if (SKB_MAX_HEAD(cdev->skb_tx_rsvd) > (512 * MAX_SKB_FRAGS) &&
(opcode == ISCSI_OP_SCSI_DATA_OUT ||
(opcode == ISCSI_OP_SCSI_CMD &&
@@ -1890,6 +1910,7 @@
return -ENOMEM;
}
+ skb_get(tdata->skb);
skb_reserve(tdata->skb, cdev->skb_tx_rsvd);
task->hdr = (struct iscsi_hdr *)tdata->skb->data;
task->hdr_max = SKB_TX_ISCSI_PDU_HEADER_MAX; /* BHS + AHS */
@@ -2035,9 +2056,9 @@
unsigned int datalen;
int err;
- if (!skb) {
+ if (!skb || cxgbi_skcb_test_flag(skb, SKCBF_TX_DONE)) {
log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX,
- "task 0x%p, skb NULL.\n", task);
+ "task 0x%p, skb 0x%p\n", task, skb);
return 0;
}
@@ -2050,7 +2071,6 @@
}
datalen = skb->data_len;
- tdata->skb = NULL;
/* write ppod first if using ofldq to write ppod */
if (ttinfo->flags & CXGBI_PPOD_INFO_FLAG_VALID) {
@@ -2078,6 +2098,7 @@
pdulen += ISCSI_DIGEST_SIZE;
task->conn->txdata_octets += pdulen;
+ cxgbi_skcb_set_flag(skb, SKCBF_TX_DONE);
return 0;
}
@@ -2086,7 +2107,6 @@
"task 0x%p, skb 0x%p, len %u/%u, %d EAGAIN.\n",
task, skb, skb->len, skb->data_len, err);
/* reset skb to send when we are called again */
- tdata->skb = skb;
return err;
}
@@ -2094,7 +2114,8 @@
"itt 0x%x, skb 0x%p, len %u/%u, xmit err %d.\n",
task->itt, skb, skb->len, skb->data_len, err);
- kfree_skb(skb);
+ __kfree_skb(tdata->skb);
+ tdata->skb = NULL;
iscsi_conn_printk(KERN_ERR, task->conn, "xmit err %d.\n", err);
iscsi_conn_failure(task->conn, ISCSI_ERR_XMIT_FAILED);
@@ -2113,8 +2134,10 @@
tcp_task->dd_data = NULL;
/* never reached the xmit task callout */
- if (tdata->skb)
- __kfree_skb(tdata->skb);
+ if (tdata->skb) {
+ kfree_skb(tdata->skb);
+ tdata->skb = NULL;
+ }
task_release_itt(task, task->hdr_itt);
memset(tdata, 0, sizeof(*tdata));
@@ -2714,6 +2737,9 @@
static int __init libcxgbi_init_module(void)
{
pr_info("%s", version);
+
+ BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, cb) <
+ sizeof(struct cxgbi_skb_cb));
return 0;
}
diff --git a/drivers/scsi/cxgbi/libcxgbi.h b/drivers/scsi/cxgbi/libcxgbi.h
index 18e0ea8..37f07aa 100644
--- a/drivers/scsi/cxgbi/libcxgbi.h
+++ b/drivers/scsi/cxgbi/libcxgbi.h
@@ -187,6 +187,7 @@
CTPF_HAS_ATID, /* reserved atid */
CTPF_HAS_TID, /* reserved hw tid */
CTPF_OFFLOAD_DOWN, /* offload function off */
+ CTPF_LOGOUT_RSP_RCVD, /* received logout response */
};
struct cxgbi_skb_rx_cb {
@@ -195,7 +196,8 @@
};
struct cxgbi_skb_tx_cb {
- void *l2t;
+ void *handle;
+ void *arp_err_handler;
struct sk_buff *wr_next;
};
@@ -203,6 +205,7 @@
SKCBF_TX_NEED_HDR, /* packet needs a header */
SKCBF_TX_MEM_WRITE, /* memory write */
SKCBF_TX_FLAG_COMPL, /* wr completion flag */
+ SKCBF_TX_DONE, /* skb tx done */
SKCBF_RX_COALESCED, /* received whole pdu */
SKCBF_RX_HDR, /* received pdu header */
SKCBF_RX_DATA, /* received pdu payload */
@@ -215,13 +218,13 @@
};
struct cxgbi_skb_cb {
- unsigned char ulp_mode;
- unsigned long flags;
- unsigned int seq;
union {
struct cxgbi_skb_rx_cb rx;
struct cxgbi_skb_tx_cb tx;
};
+ unsigned char ulp_mode;
+ unsigned long flags;
+ unsigned int seq;
};
#define CXGBI_SKB_CB(skb) ((struct cxgbi_skb_cb *)&((skb)->cb[0]))
@@ -374,11 +377,9 @@
cxgbi_skcb_tx_wr_next(skb) = NULL;
/*
* We want to take an extra reference since both us and the driver
- * need to free the packet before it's really freed. We know there's
- * just one user currently so we use atomic_set rather than skb_get
- * to avoid the atomic op.
+ * need to free the packet before it's really freed.
*/
- atomic_set(&skb->users, 2);
+ skb_get(skb);
if (!csk->wr_pending_head)
csk->wr_pending_head = skb;
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index 3cbab87..2ceff58 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -265,18 +265,16 @@
struct list_head *list,
unsigned char *cdb)
{
- struct scsi_device *sdev = ctlr->ms_sdev;
- struct rdac_dh_data *h = sdev->handler_data;
struct rdac_mode_common *common;
unsigned data_size;
struct rdac_queue_data *qdata;
u8 *lun_table;
- if (h->ctlr->use_ms10) {
+ if (ctlr->use_ms10) {
struct rdac_pg_expanded *rdac_pg;
data_size = sizeof(struct rdac_pg_expanded);
- rdac_pg = &h->ctlr->mode_select.expanded;
+ rdac_pg = &ctlr->mode_select.expanded;
memset(rdac_pg, 0, data_size);
common = &rdac_pg->common;
rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER + 0x40;
@@ -288,7 +286,7 @@
struct rdac_pg_legacy *rdac_pg;
data_size = sizeof(struct rdac_pg_legacy);
- rdac_pg = &h->ctlr->mode_select.legacy;
+ rdac_pg = &ctlr->mode_select.legacy;
memset(rdac_pg, 0, data_size);
common = &rdac_pg->common;
rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER;
@@ -304,7 +302,7 @@
}
/* Prepare the command. */
- if (h->ctlr->use_ms10) {
+ if (ctlr->use_ms10) {
cdb[0] = MODE_SELECT_10;
cdb[7] = data_size >> 8;
cdb[8] = data_size & 0xff;
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 8912767..da669dc 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -127,7 +127,7 @@
void lpfc_do_scr_ns_plogi(struct lpfc_hba *, struct lpfc_vport *);
int lpfc_check_sparm(struct lpfc_vport *, struct lpfc_nodelist *,
struct serv_parm *, uint32_t, int);
-int lpfc_els_abort(struct lpfc_hba *, struct lpfc_nodelist *);
+void lpfc_els_abort(struct lpfc_hba *, struct lpfc_nodelist *);
void lpfc_more_plogi(struct lpfc_vport *);
void lpfc_more_adisc(struct lpfc_vport *);
void lpfc_end_rscn(struct lpfc_vport *);
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index f2cd19c..24ce96d 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -978,9 +978,10 @@
ndlp, did, ndlp->nlp_fc4_type,
FC_TYPE_FCP, FC_TYPE_NVME);
ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
+
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
+ lpfc_issue_els_prli(vport, ndlp, 0);
}
- lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
- lpfc_issue_els_prli(vport, ndlp, 0);
} else
lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
"3065 GFT_ID failed x%08x\n", irsp->ulpStatus);
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index bff3de0..f74cb01 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -206,7 +206,7 @@
* associated with a LPFC_NODELIST entry. This
* routine effectively results in a "software abort".
*/
-int
+void
lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
{
LIST_HEAD(abort_list);
@@ -215,6 +215,10 @@
pring = lpfc_phba_elsring(phba);
+ /* In case of error recovery path, we might have a NULL pring here */
+ if (!pring)
+ return;
+
/* Abort outstanding I/O on NPort <nlp_DID> */
lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_DISCOVERY,
"2819 Abort outstanding I/O on NPort x%x "
@@ -273,7 +277,6 @@
IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
lpfc_cancel_retry_delay_tmo(phba->pport, ndlp);
- return 0;
}
static int
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
index 074a6b5..518b15e 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.c
+++ b/drivers/scsi/lpfc/lpfc_nvmet.c
@@ -799,8 +799,8 @@
}
spin_unlock_irqrestore(&ctxp->ctxlock, flags);
- lpfc_nvmeio_data(phba, "NVMET FCP FREE: xri x%x ste %d\n", ctxp->oxid,
- ctxp->state, 0);
+ lpfc_nvmeio_data(phba, "NVMET FCP FREE: xri x%x ste %d abt %d\n", ctxp->oxid,
+ ctxp->state, aborting);
atomic_inc(&lpfc_nvmep->xmt_fcp_release);
diff --git a/drivers/scsi/qedi/qedi.h b/drivers/scsi/qedi/qedi.h
index 5ca3e8c..32632c9 100644
--- a/drivers/scsi/qedi/qedi.h
+++ b/drivers/scsi/qedi/qedi.h
@@ -38,7 +38,7 @@
#define QEDI_MAX_ISCSI_TASK 4096
#define QEDI_MAX_TASK_NUM 0x0FFF
#define QEDI_MAX_ISCSI_CONNS_PER_HBA 1024
-#define QEDI_ISCSI_MAX_BDS_PER_CMD 256 /* Firmware max BDs is 256 */
+#define QEDI_ISCSI_MAX_BDS_PER_CMD 255 /* Firmware max BDs is 255 */
#define MAX_OUSTANDING_TASKS_PER_CON 1024
#define QEDI_MAX_BD_LEN 0xffff
@@ -63,6 +63,7 @@
#define QEDI_PAGE_MASK (~((QEDI_PAGE_SIZE) - 1))
#define QEDI_PAGE_SIZE 4096
+#define QEDI_HW_DMA_BOUNDARY 0xfff
#define QEDI_PATH_HANDLE 0xFE0000000UL
struct qedi_uio_ctrl {
diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c
index d6978cb..8bc7ee1 100644
--- a/drivers/scsi/qedi/qedi_fw.c
+++ b/drivers/scsi/qedi/qedi_fw.c
@@ -1494,6 +1494,8 @@
tmf_hdr = (struct iscsi_tm *)mtask->hdr;
qedi_cmd = (struct qedi_cmd *)mtask->dd_data;
ep = qedi_conn->ep;
+ if (!ep)
+ return -ENODEV;
tid = qedi_get_task_idx(qedi);
if (tid == -1)
diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c
index 3548d46..87f0af3 100644
--- a/drivers/scsi/qedi/qedi_iscsi.c
+++ b/drivers/scsi/qedi/qedi_iscsi.c
@@ -59,6 +59,7 @@
.this_id = -1,
.sg_tablesize = QEDI_ISCSI_MAX_BDS_PER_CMD,
.max_sectors = 0xffff,
+ .dma_boundary = QEDI_HW_DMA_BOUNDARY,
.cmd_per_lun = 128,
.use_clustering = ENABLE_CLUSTERING,
.shost_attrs = qedi_shost_attrs,
@@ -1223,8 +1224,12 @@
iscsi_cid = (u32)path_data->handle;
qedi_ep = qedi->ep_tbl[iscsi_cid];
- QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
"iscsi_cid=0x%x, qedi_ep=%p\n", iscsi_cid, qedi_ep);
+ if (!qedi_ep) {
+ ret = -EINVAL;
+ goto set_path_exit;
+ }
if (!is_valid_ether_addr(&path_data->mac_addr[0])) {
QEDI_NOTICE(&qedi->dbg_ctx, "dst mac NOT VALID\n");
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index 92775a8..09a2946 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -151,6 +151,11 @@
static void __qedi_free_uio_rings(struct qedi_uio_dev *udev)
{
+ if (udev->uctrl) {
+ free_page((unsigned long)udev->uctrl);
+ udev->uctrl = NULL;
+ }
+
if (udev->ll2_ring) {
free_page((unsigned long)udev->ll2_ring);
udev->ll2_ring = NULL;
@@ -169,7 +174,6 @@
__qedi_free_uio_rings(udev);
pci_dev_put(udev->pdev);
- kfree(udev->uctrl);
kfree(udev);
}
@@ -208,6 +212,11 @@
if (udev->ll2_ring || udev->ll2_buf)
return rc;
+ /* Memory for control area. */
+ udev->uctrl = (void *)get_zeroed_page(GFP_KERNEL);
+ if (!udev->uctrl)
+ return -ENOMEM;
+
/* Allocating memory for LL2 ring */
udev->ll2_ring_size = QEDI_PAGE_SIZE;
udev->ll2_ring = (void *)get_zeroed_page(GFP_KERNEL | __GFP_COMP);
@@ -237,7 +246,6 @@
static int qedi_alloc_uio_rings(struct qedi_ctx *qedi)
{
struct qedi_uio_dev *udev = NULL;
- struct qedi_uio_ctrl *uctrl = NULL;
int rc = 0;
list_for_each_entry(udev, &qedi_udev_list, list) {
@@ -258,21 +266,14 @@
goto err_udev;
}
- uctrl = kzalloc(sizeof(*uctrl), GFP_KERNEL);
- if (!uctrl) {
- rc = -ENOMEM;
- goto err_uctrl;
- }
-
udev->uio_dev = -1;
udev->qedi = qedi;
udev->pdev = qedi->pdev;
- udev->uctrl = uctrl;
rc = __qedi_alloc_uio_rings(udev);
if (rc)
- goto err_uio_rings;
+ goto err_uctrl;
list_add(&udev->list, &qedi_udev_list);
@@ -283,8 +284,6 @@
udev->rx_pkt = udev->ll2_buf + LL2_SINGLE_BUF_SIZE;
return 0;
- err_uio_rings:
- kfree(uctrl);
err_uctrl:
kfree(udev);
err_udev:
@@ -828,6 +827,8 @@
qedi->pf_params.iscsi_pf_params.num_uhq_pages_in_ring = num_sq_pages;
qedi->pf_params.iscsi_pf_params.num_queues = qedi->num_queues;
qedi->pf_params.iscsi_pf_params.debug_mode = qedi_fw_debug;
+ qedi->pf_params.iscsi_pf_params.two_msl_timer = 4000;
+ qedi->pf_params.iscsi_pf_params.max_fin_rt = 2;
for (log_page_size = 0 ; log_page_size < 32 ; log_page_size++) {
if ((1 << log_page_size) == PAGE_SIZE)
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index 16d1cd5..ca3420d 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -730,6 +730,8 @@
return -EIO;
}
+ memset(&elreq, 0, sizeof(elreq));
+
elreq.req_sg_cnt = dma_map_sg(&ha->pdev->dev,
bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt,
DMA_TO_DEVICE);
@@ -795,10 +797,9 @@
if (atomic_read(&vha->loop_state) == LOOP_READY &&
(ha->current_topology == ISP_CFG_F ||
- ((IS_QLA81XX(ha) || IS_QLA8031(ha) || IS_QLA8044(ha)) &&
- le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE
- && req_data_len == MAX_ELS_FRAME_PAYLOAD)) &&
- elreq.options == EXTERNAL_LOOPBACK) {
+ (le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE &&
+ req_data_len == MAX_ELS_FRAME_PAYLOAD)) &&
+ elreq.options == EXTERNAL_LOOPBACK) {
type = "FC_BSG_HST_VENDOR_ECHO_DIAG";
ql_dbg(ql_dbg_user, vha, 0x701e,
"BSG request type: %s.\n", type);
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 51b4179..88748a6 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -1131,7 +1131,7 @@
/* Mailbox registers. */
mbx_reg = ®->mailbox0;
- for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, dmp_reg++)
+ for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, mbx_reg++)
fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg));
/* Transfer sequence registers. */
@@ -2090,7 +2090,7 @@
/* Mailbox registers. */
mbx_reg = ®->mailbox0;
- for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, dmp_reg++)
+ for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, mbx_reg++)
fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg));
/* Transfer sequence registers. */
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index ae11901..eddbc12 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -3425,6 +3425,7 @@
uint8_t max_req_queues;
uint8_t max_rsp_queues;
uint8_t max_qpairs;
+ uint8_t num_qpairs;
struct qla_qpair *base_qpair;
struct qla_npiv_entry *npiv_info;
uint16_t nvram_npiv_size;
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 0347433..0391fc3 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -7543,12 +7543,13 @@
/* Assign available que pair id */
mutex_lock(&ha->mq_lock);
qpair_id = find_first_zero_bit(ha->qpair_qid_map, ha->max_qpairs);
- if (qpair_id >= ha->max_qpairs) {
+ if (ha->num_qpairs >= ha->max_qpairs) {
mutex_unlock(&ha->mq_lock);
ql_log(ql_log_warn, vha, 0x0183,
"No resources to create additional q pair.\n");
goto fail_qid_map;
}
+ ha->num_qpairs++;
set_bit(qpair_id, ha->qpair_qid_map);
ha->queue_pair_map[qpair_id] = qpair;
qpair->id = qpair_id;
@@ -7635,6 +7636,7 @@
fail_msix:
ha->queue_pair_map[qpair_id] = NULL;
clear_bit(qpair_id, ha->qpair_qid_map);
+ ha->num_qpairs--;
mutex_unlock(&ha->mq_lock);
fail_qid_map:
kfree(qpair);
@@ -7660,6 +7662,7 @@
mutex_lock(&ha->mq_lock);
ha->queue_pair_map[qpair->id] = NULL;
clear_bit(qpair->id, ha->qpair_qid_map);
+ ha->num_qpairs--;
list_del(&qpair->qp_list_elem);
if (list_empty(&vha->qp_list))
vha->flags.qpairs_available = 0;
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index 66df6ce..c61a6a8 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -129,28 +129,16 @@
}
static inline void
-qla2x00_clean_dsd_pool(struct qla_hw_data *ha, srb_t *sp,
- struct qla_tgt_cmd *tc)
+qla2x00_clean_dsd_pool(struct qla_hw_data *ha, struct crc_context *ctx)
{
- struct dsd_dma *dsd_ptr, *tdsd_ptr;
- struct crc_context *ctx;
-
- if (sp)
- ctx = (struct crc_context *)GET_CMD_CTX_SP(sp);
- else if (tc)
- ctx = (struct crc_context *)tc->ctx;
- else {
- BUG();
- return;
- }
+ struct dsd_dma *dsd, *tdsd;
/* clean up allocated prev pool */
- list_for_each_entry_safe(dsd_ptr, tdsd_ptr,
- &ctx->dsd_list, list) {
- dma_pool_free(ha->dl_dma_pool, dsd_ptr->dsd_addr,
- dsd_ptr->dsd_list_dma);
- list_del(&dsd_ptr->list);
- kfree(dsd_ptr);
+ list_for_each_entry_safe(dsd, tdsd, &ctx->dsd_list, list) {
+ dma_pool_free(ha->dl_dma_pool, dsd->dsd_addr,
+ dsd->dsd_list_dma);
+ list_del(&dsd->list);
+ kfree(dsd);
}
INIT_LIST_HEAD(&ctx->dsd_list);
}
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index aac0350..2572121 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -3282,7 +3282,7 @@
}
/* Enable MSI-X vector for response queue update for queue 0 */
- if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
+ if (IS_QLA25XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
if (ha->msixbase && ha->mqiobase &&
(ha->max_rsp_queues > 1 || ha->max_req_queues > 1 ||
ql2xmqsupport))
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index a113ab3..cba1fc5 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -3676,15 +3676,6 @@
qlt_update_host_map(vha, id);
}
- fc_host_port_name(vha->host) =
- wwn_to_u64(vha->port_name);
-
- if (qla_ini_mode_enabled(vha))
- ql_dbg(ql_dbg_mbx, vha, 0x1018,
- "FA-WWN portname %016llx (%x)\n",
- fc_host_port_name(vha->host),
- rptid_entry->vp_status);
-
set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
} else {
@@ -4821,9 +4812,9 @@
memset(mcp->mb, 0 , sizeof(mcp->mb));
mcp->mb[0] = MBC_DIAGNOSTIC_ECHO;
- mcp->mb[1] = mreq->options | BIT_6; /* BIT_6 specifies 64bit address */
+ /* BIT_6 specifies 64bit address */
+ mcp->mb[1] = mreq->options | BIT_15 | BIT_6;
if (IS_CNA_CAPABLE(ha)) {
- mcp->mb[1] |= BIT_15;
mcp->mb[2] = vha->fcoe_fcf_idx;
}
mcp->mb[16] = LSW(mreq->rcv_dma);
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 1c79579..79f0502 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -630,29 +630,34 @@
sp->flags &= ~SRB_CRC_PROT_DMA_VALID;
}
+ if (!ctx)
+ goto end;
+
if (sp->flags & SRB_CRC_CTX_DSD_VALID) {
/* List assured to be having elements */
- qla2x00_clean_dsd_pool(ha, sp, NULL);
+ qla2x00_clean_dsd_pool(ha, ctx);
sp->flags &= ~SRB_CRC_CTX_DSD_VALID;
}
if (sp->flags & SRB_CRC_CTX_DMA_VALID) {
- dma_pool_free(ha->dl_dma_pool, ctx,
- ((struct crc_context *)ctx)->crc_ctx_dma);
+ struct crc_context *ctx0 = ctx;
+
+ dma_pool_free(ha->dl_dma_pool, ctx0, ctx0->crc_ctx_dma);
sp->flags &= ~SRB_CRC_CTX_DMA_VALID;
}
if (sp->flags & SRB_FCP_CMND_DMA_VALID) {
- struct ct6_dsd *ctx1 = (struct ct6_dsd *)ctx;
+ struct ct6_dsd *ctx1 = ctx;
dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd,
- ctx1->fcp_cmnd_dma);
+ ctx1->fcp_cmnd_dma);
list_splice(&ctx1->dsd_list, &ha->gbl_dsd_list);
ha->gbl_dsd_inuse -= ctx1->dsd_use_cnt;
ha->gbl_dsd_avail += ctx1->dsd_use_cnt;
mempool_free(ctx1, ha->ctx_mempool);
}
+end:
CMD_SP(cmd) = NULL;
qla2x00_rel_sp(sp);
}
@@ -699,21 +704,24 @@
sp->flags &= ~SRB_CRC_PROT_DMA_VALID;
}
+ if (!ctx)
+ goto end;
+
if (sp->flags & SRB_CRC_CTX_DSD_VALID) {
/* List assured to be having elements */
- qla2x00_clean_dsd_pool(ha, sp, NULL);
+ qla2x00_clean_dsd_pool(ha, ctx);
sp->flags &= ~SRB_CRC_CTX_DSD_VALID;
}
if (sp->flags & SRB_CRC_CTX_DMA_VALID) {
- dma_pool_free(ha->dl_dma_pool, ctx,
- ((struct crc_context *)ctx)->crc_ctx_dma);
+ struct crc_context *ctx0 = ctx;
+
+ dma_pool_free(ha->dl_dma_pool, ctx, ctx0->crc_ctx_dma);
sp->flags &= ~SRB_CRC_CTX_DMA_VALID;
}
if (sp->flags & SRB_FCP_CMND_DMA_VALID) {
- struct ct6_dsd *ctx1 = (struct ct6_dsd *)ctx;
-
+ struct ct6_dsd *ctx1 = ctx;
dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd,
ctx1->fcp_cmnd_dma);
list_splice(&ctx1->dsd_list, &ha->gbl_dsd_list);
@@ -721,7 +729,7 @@
ha->gbl_dsd_avail += ctx1->dsd_use_cnt;
mempool_free(ctx1, ha->ctx_mempool);
}
-
+end:
CMD_SP(cmd) = NULL;
qla2xxx_rel_qpair_sp(sp->qpair, sp);
}
@@ -1632,7 +1640,7 @@
void
qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
{
- int que, cnt;
+ int que, cnt, status;
unsigned long flags;
srb_t *sp;
struct qla_hw_data *ha = vha->hw;
@@ -1662,8 +1670,12 @@
*/
sp_get(sp);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
- qla2xxx_eh_abort(GET_CMD_SP(sp));
+ status = qla2xxx_eh_abort(GET_CMD_SP(sp));
spin_lock_irqsave(&ha->hardware_lock, flags);
+ /* Get rid of extra reference if immediate exit
+ * from ql2xxx_eh_abort */
+ if (status == FAILED && (qla2x00_isp_reg_stat(ha)))
+ atomic_dec(&sp->ref_count);
}
req->outstanding_cmds[cnt] = NULL;
sp->done(sp, res);
@@ -2623,10 +2635,10 @@
if (mem_only) {
if (pci_enable_device_mem(pdev))
- goto probe_out;
+ return ret;
} else {
if (pci_enable_device(pdev))
- goto probe_out;
+ return ret;
}
/* This may fail but that's ok */
@@ -2636,7 +2648,7 @@
if (!ha) {
ql_log_pci(ql_log_fatal, pdev, 0x0009,
"Unable to allocate memory for ha.\n");
- goto probe_out;
+ goto disable_device;
}
ql_dbg_pci(ql_dbg_init, pdev, 0x000a,
"Memory allocated for ha=%p.\n", ha);
@@ -3254,7 +3266,7 @@
pci_release_selected_regions(ha->pdev, ha->bars);
kfree(ha);
-probe_out:
+disable_device:
pci_disable_device(pdev);
return ret;
}
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 0e03ca2..e766d84 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -2245,11 +2245,13 @@
pci_unmap_sg(ha->pdev, cmd->prot_sg, cmd->prot_sg_cnt,
cmd->dma_data_direction);
- if (cmd->ctx_dsd_alloced)
- qla2x00_clean_dsd_pool(ha, NULL, cmd);
+ if (!cmd->ctx)
+ return;
- if (cmd->ctx)
- dma_pool_free(ha->dl_dma_pool, cmd->ctx, cmd->ctx->crc_ctx_dma);
+ if (cmd->ctx_dsd_alloced)
+ qla2x00_clean_dsd_pool(ha, cmd->ctx);
+
+ dma_pool_free(ha->dl_dma_pool, cmd->ctx, cmd->ctx->crc_ctx_dma);
}
static int qlt_check_reserve_free_req(struct scsi_qla_host *vha,
diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c
index 8a58ef3..c197972 100644
--- a/drivers/scsi/qla2xxx/qla_tmpl.c
+++ b/drivers/scsi/qla2xxx/qla_tmpl.c
@@ -371,7 +371,7 @@
goto done;
}
- if (end <= start || start == 0 || end == 0) {
+ if (end < start || start == 0 || end == 0) {
ql_dbg(ql_dbg_misc, vha, 0xd023,
"%s: unusable range (start=%x end=%x)\n", __func__,
ent->t262.end_addr, ent->t262.start_addr);
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 17249c3..dc095a2 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -1404,7 +1404,7 @@
arr[4] = SDEBUG_LONG_INQ_SZ - 5;
arr[5] = (int)have_dif_prot; /* PROTECT bit */
if (sdebug_vpd_use_hostno == 0)
- arr[5] = 0x10; /* claim: implicit TGPS */
+ arr[5] |= 0x10; /* claim: implicit TPGS */
arr[6] = 0x10; /* claim: MultiP */
/* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
arr[7] = 0xa; /* claim: LINKED + CMDQUE */
diff --git a/drivers/staging/ccree/Kconfig b/drivers/staging/ccree/Kconfig
index ae62704..4be87f5 100644
--- a/drivers/staging/ccree/Kconfig
+++ b/drivers/staging/ccree/Kconfig
@@ -1,6 +1,6 @@
config CRYPTO_DEV_CCREE
tristate "Support for ARM TrustZone CryptoCell C7XX family of Crypto accelerators"
- depends on CRYPTO_HW && OF && HAS_DMA
+ depends on CRYPTO && CRYPTO_HW && OF && HAS_DMA
default n
select CRYPTO_HASH
select CRYPTO_BLKCIPHER
diff --git a/drivers/staging/ccree/ssi_buffer_mgr.c b/drivers/staging/ccree/ssi_buffer_mgr.c
index 038e2ff..6471d3d 100644
--- a/drivers/staging/ccree/ssi_buffer_mgr.c
+++ b/drivers/staging/ccree/ssi_buffer_mgr.c
@@ -216,7 +216,8 @@
uint32_t nents, lbytes;
nents = ssi_buffer_mgr_get_sgl_nents(sg, end, &lbytes, NULL);
- sg_copy_buffer(sg, nents, (void *)dest, (end - to_skip), 0, (direct == SSI_SG_TO_BUF));
+ sg_copy_buffer(sg, nents, (void *)dest, (end - to_skip + 1), to_skip,
+ (direct == SSI_SG_TO_BUF));
}
static inline int ssi_buffer_mgr_render_buff_to_mlli(
diff --git a/drivers/staging/iio/cdc/ad7152.c b/drivers/staging/iio/cdc/ad7152.c
index dc6ecd8..ff10d1f 100644
--- a/drivers/staging/iio/cdc/ad7152.c
+++ b/drivers/staging/iio/cdc/ad7152.c
@@ -231,16 +231,12 @@
if (i >= ARRAY_SIZE(ad7152_filter_rate_table))
i = ARRAY_SIZE(ad7152_filter_rate_table) - 1;
- mutex_lock(&chip->state_lock);
ret = i2c_smbus_write_byte_data(chip->client,
AD7152_REG_CFG2, AD7152_CFG2_OSR(i));
- if (ret < 0) {
- mutex_unlock(&chip->state_lock);
+ if (ret < 0)
return ret;
- }
chip->filter_rate_setup = i;
- mutex_unlock(&chip->state_lock);
return ret;
}
diff --git a/drivers/staging/lustre/lustre/lov/lov_pack.c b/drivers/staging/lustre/lustre/lov/lov_pack.c
index 2e1bd47..e6727ce 100644
--- a/drivers/staging/lustre/lustre/lov/lov_pack.c
+++ b/drivers/staging/lustre/lustre/lov/lov_pack.c
@@ -293,18 +293,10 @@
size_t lmmk_size;
size_t lum_size;
int rc;
- mm_segment_t seg;
if (!lsm)
return -ENODATA;
- /*
- * "Switch to kernel segment" to allow copying from kernel space by
- * copy_{to,from}_user().
- */
- seg = get_fs();
- set_fs(KERNEL_DS);
-
if (lsm->lsm_magic != LOV_MAGIC_V1 && lsm->lsm_magic != LOV_MAGIC_V3) {
CERROR("bad LSM MAGIC: 0x%08X != 0x%08X nor 0x%08X\n",
lsm->lsm_magic, LOV_MAGIC_V1, LOV_MAGIC_V3);
@@ -406,6 +398,5 @@
out_free:
kvfree(lmmk);
out:
- set_fs(seg);
return rc;
}
diff --git a/drivers/staging/media/atomisp/i2c/Makefile b/drivers/staging/media/atomisp/i2c/Makefile
index 8ea0190..466517c 100644
--- a/drivers/staging/media/atomisp/i2c/Makefile
+++ b/drivers/staging/media/atomisp/i2c/Makefile
@@ -19,5 +19,3 @@
obj-$(CONFIG_VIDEO_LM3554) += lm3554.o
-ccflags-y += -Werror
-
diff --git a/drivers/staging/media/atomisp/i2c/imx/Makefile b/drivers/staging/media/atomisp/i2c/imx/Makefile
index 1d7f7ab..6b13a3a 100644
--- a/drivers/staging/media/atomisp/i2c/imx/Makefile
+++ b/drivers/staging/media/atomisp/i2c/imx/Makefile
@@ -4,5 +4,3 @@
ov8858_driver-objs := ../ov8858.o dw9718.o vcm.o
obj-$(CONFIG_VIDEO_OV8858) += ov8858_driver.o
-
-ccflags-y += -Werror
diff --git a/drivers/staging/media/atomisp/i2c/ov5693/Makefile b/drivers/staging/media/atomisp/i2c/ov5693/Makefile
index fceb9e9..c9c0e12 100644
--- a/drivers/staging/media/atomisp/i2c/ov5693/Makefile
+++ b/drivers/staging/media/atomisp/i2c/ov5693/Makefile
@@ -1,3 +1 @@
obj-$(CONFIG_VIDEO_OV5693) += ov5693.o
-
-ccflags-y += -Werror
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/Makefile b/drivers/staging/media/atomisp/pci/atomisp2/Makefile
index 3fa7c1c..f126a89 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/Makefile
+++ b/drivers/staging/media/atomisp/pci/atomisp2/Makefile
@@ -351,5 +351,5 @@
DEFINES += -DATOMISP_POSTFIX=\"css2400b0_v21\" -DISP2400B0
DEFINES += -DSYSTEM_hive_isp_css_2400_system -DISP2400
-ccflags-y += $(INCLUDES) $(DEFINES) -fno-common -Werror
+ccflags-y += $(INCLUDES) $(DEFINES) -fno-common
diff --git a/drivers/staging/rtl8188eu/os_dep/mon.c b/drivers/staging/rtl8188eu/os_dep/mon.c
index cfe37eb..859d0d6 100644
--- a/drivers/staging/rtl8188eu/os_dep/mon.c
+++ b/drivers/staging/rtl8188eu/os_dep/mon.c
@@ -152,7 +152,7 @@
static void mon_setup(struct net_device *dev)
{
dev->netdev_ops = &mon_netdev_ops;
- dev->destructor = free_netdev;
+ dev->needs_free_netdev = true;
ether_setup(dev);
dev->priv_flags |= IFF_NO_QUEUE;
dev->type = ARPHRD_IEEE80211;
diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
index 36c3189..bd4352f 100644
--- a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
+++ b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
@@ -2667,7 +2667,8 @@
mon_ndev->type = ARPHRD_IEEE80211_RADIOTAP;
strncpy(mon_ndev->name, name, IFNAMSIZ);
mon_ndev->name[IFNAMSIZ - 1] = 0;
- mon_ndev->destructor = rtw_ndev_destructor;
+ mon_ndev->needs_free_netdev = true;
+ mon_ndev->priv_destructor = rtw_ndev_destructor;
mon_ndev->netdev_ops = &rtw_cfg80211_monitor_if_ops;
diff --git a/drivers/staging/rtl8723bs/os_dep/os_intfs.c b/drivers/staging/rtl8723bs/os_dep/os_intfs.c
index f83cfc7..0215899 100644
--- a/drivers/staging/rtl8723bs/os_dep/os_intfs.c
+++ b/drivers/staging/rtl8723bs/os_dep/os_intfs.c
@@ -1207,8 +1207,6 @@
if (ndev->ieee80211_ptr)
kfree((u8 *)ndev->ieee80211_ptr);
-
- free_netdev(ndev);
}
void rtw_dev_unload(struct adapter *padapter)
diff --git a/drivers/staging/rtl8723bs/os_dep/osdep_service.c b/drivers/staging/rtl8723bs/os_dep/osdep_service.c
index 02db59e..aa16d1a 100644
--- a/drivers/staging/rtl8723bs/os_dep/osdep_service.c
+++ b/drivers/staging/rtl8723bs/os_dep/osdep_service.c
@@ -160,7 +160,7 @@
oldfs = get_fs(); set_fs(get_ds());
if (1!=readFile(fp, &buf, 1))
- ret = PTR_ERR(fp);
+ ret = -EINVAL;
set_fs(oldfs);
filp_close(fp, NULL);
diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
index 4fb3165..6b13719 100644
--- a/drivers/tty/tty_port.c
+++ b/drivers/tty/tty_port.c
@@ -34,9 +34,7 @@
if (!disc)
return 0;
- mutex_lock(&tty->atomic_write_lock);
ret = tty_ldisc_receive_buf(disc, p, (char *)f, count);
- mutex_unlock(&tty->atomic_write_lock);
tty_ldisc_deref(disc);
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index 9e217b1..fe4fe24 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -843,7 +843,10 @@
{
struct ci_hdrc *ci = dev_get_drvdata(dev);
- return sprintf(buf, "%s\n", ci_role(ci)->name);
+ if (ci->role != CI_ROLE_END)
+ return sprintf(buf, "%s\n", ci_role(ci)->name);
+
+ return 0;
}
static ssize_t ci_role_store(struct device *dev,
diff --git a/drivers/usb/chipidea/debug.c b/drivers/usb/chipidea/debug.c
index 6d23eed..1c31e8a 100644
--- a/drivers/usb/chipidea/debug.c
+++ b/drivers/usb/chipidea/debug.c
@@ -294,7 +294,8 @@
{
struct ci_hdrc *ci = s->private;
- seq_printf(s, "%s\n", ci_role(ci)->name);
+ if (ci->role != CI_ROLE_END)
+ seq_printf(s, "%s\n", ci_role(ci)->name);
return 0;
}
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index 56d2d32..d68b125 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -1993,6 +1993,7 @@
int ci_hdrc_gadget_init(struct ci_hdrc *ci)
{
struct ci_role_driver *rdrv;
+ int ret;
if (!hw_read(ci, CAP_DCCPARAMS, DCCPARAMS_DC))
return -ENXIO;
@@ -2005,7 +2006,10 @@
rdrv->stop = udc_id_switch_for_host;
rdrv->irq = udc_irq;
rdrv->name = "gadget";
- ci->roles[CI_ROLE_GADGET] = rdrv;
- return udc_start(ci);
+ ret = udc_start(ci);
+ if (!ret)
+ ci->roles[CI_ROLE_GADGET] = rdrv;
+
+ return ret;
}
diff --git a/drivers/usb/chipidea/usbmisc_imx.c b/drivers/usb/chipidea/usbmisc_imx.c
index e77a4ed..9f4a018 100644
--- a/drivers/usb/chipidea/usbmisc_imx.c
+++ b/drivers/usb/chipidea/usbmisc_imx.c
@@ -108,6 +108,8 @@
const struct usbmisc_ops *ops;
};
+static inline bool is_imx53_usbmisc(struct imx_usbmisc_data *data);
+
static int usbmisc_imx25_init(struct imx_usbmisc_data *data)
{
struct imx_usbmisc *usbmisc = dev_get_drvdata(data->dev);
@@ -242,10 +244,15 @@
val = readl(reg) | MX53_USB_UHx_CTRL_WAKE_UP_EN
| MX53_USB_UHx_CTRL_ULPI_INT_EN;
writel(val, reg);
- /* Disable internal 60Mhz clock */
- reg = usbmisc->base + MX53_USB_CLKONOFF_CTRL_OFFSET;
- val = readl(reg) | MX53_USB_CLKONOFF_CTRL_H2_INT60CKOFF;
- writel(val, reg);
+ if (is_imx53_usbmisc(data)) {
+ /* Disable internal 60Mhz clock */
+ reg = usbmisc->base +
+ MX53_USB_CLKONOFF_CTRL_OFFSET;
+ val = readl(reg) |
+ MX53_USB_CLKONOFF_CTRL_H2_INT60CKOFF;
+ writel(val, reg);
+ }
+
}
if (data->disable_oc) {
reg = usbmisc->base + MX53_USB_UH2_CTRL_OFFSET;
@@ -267,10 +274,15 @@
val = readl(reg) | MX53_USB_UHx_CTRL_WAKE_UP_EN
| MX53_USB_UHx_CTRL_ULPI_INT_EN;
writel(val, reg);
- /* Disable internal 60Mhz clock */
- reg = usbmisc->base + MX53_USB_CLKONOFF_CTRL_OFFSET;
- val = readl(reg) | MX53_USB_CLKONOFF_CTRL_H3_INT60CKOFF;
- writel(val, reg);
+
+ if (is_imx53_usbmisc(data)) {
+ /* Disable internal 60Mhz clock */
+ reg = usbmisc->base +
+ MX53_USB_CLKONOFF_CTRL_OFFSET;
+ val = readl(reg) |
+ MX53_USB_CLKONOFF_CTRL_H3_INT60CKOFF;
+ writel(val, reg);
+ }
}
if (data->disable_oc) {
reg = usbmisc->base + MX53_USB_UH3_CTRL_OFFSET;
@@ -456,6 +468,10 @@
.init = usbmisc_imx27_init,
};
+static const struct usbmisc_ops imx51_usbmisc_ops = {
+ .init = usbmisc_imx53_init,
+};
+
static const struct usbmisc_ops imx53_usbmisc_ops = {
.init = usbmisc_imx53_init,
};
@@ -479,6 +495,13 @@
.set_wakeup = usbmisc_imx7d_set_wakeup,
};
+static inline bool is_imx53_usbmisc(struct imx_usbmisc_data *data)
+{
+ struct imx_usbmisc *usbmisc = dev_get_drvdata(data->dev);
+
+ return usbmisc->ops == &imx53_usbmisc_ops;
+}
+
int imx_usbmisc_init(struct imx_usbmisc_data *data)
{
struct imx_usbmisc *usbmisc;
@@ -536,7 +559,7 @@
},
{
.compatible = "fsl,imx51-usbmisc",
- .data = &imx53_usbmisc_ops,
+ .data = &imx51_usbmisc_ops,
},
{
.compatible = "fsl,imx53-usbmisc",
diff --git a/drivers/usb/dwc2/params.c b/drivers/usb/dwc2/params.c
index 9cd8722..a3ffe97 100644
--- a/drivers/usb/dwc2/params.c
+++ b/drivers/usb/dwc2/params.c
@@ -144,6 +144,8 @@
{ .compatible = "lantiq,xrx200-usb", .data = dwc2_set_ltq_params },
{ .compatible = "snps,dwc2" },
{ .compatible = "samsung,s3c6400-hsotg" },
+ { .compatible = "amlogic,meson8-usb",
+ .data = dwc2_set_amlogic_params },
{ .compatible = "amlogic,meson8b-usb",
.data = dwc2_set_amlogic_params },
{ .compatible = "amlogic,meson-gxbb-usb",
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index 49d685a..45b5540 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -315,6 +315,9 @@
list_del(&f->list);
if (f->unbind)
f->unbind(c, f);
+
+ if (f->bind_deactivated)
+ usb_function_activate(f);
}
EXPORT_SYMBOL_GPL(usb_remove_function);
@@ -956,12 +959,8 @@
f = list_first_entry(&config->functions,
struct usb_function, list);
- list_del(&f->list);
- if (f->unbind) {
- DBG(cdev, "unbind function '%s'/%p\n", f->name, f);
- f->unbind(config, f);
- /* may free memory for "f" */
- }
+
+ usb_remove_function(config, f);
}
list_del(&config->list);
if (config->unbind) {
diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c
index 4c8aacc..74d57d6 100644
--- a/drivers/usb/gadget/function/f_mass_storage.c
+++ b/drivers/usb/gadget/function/f_mass_storage.c
@@ -396,7 +396,11 @@
/* Caller must hold fsg->lock */
static void wakeup_thread(struct fsg_common *common)
{
- smp_wmb(); /* ensure the write of bh->state is complete */
+ /*
+ * Ensure the reading of thread_wakeup_needed
+ * and the writing of bh->state are completed
+ */
+ smp_mb();
/* Tell the main thread that something has happened */
common->thread_wakeup_needed = 1;
if (common->thread_task)
@@ -627,7 +631,12 @@
}
__set_current_state(TASK_RUNNING);
common->thread_wakeup_needed = 0;
- smp_rmb(); /* ensure the latest bh->state is visible */
+
+ /*
+ * Ensure the writing of thread_wakeup_needed
+ * and the reading of bh->state are completed
+ */
+ smp_mb();
return rc;
}
diff --git a/drivers/usb/gadget/function/f_phonet.c b/drivers/usb/gadget/function/f_phonet.c
index b4058f0..6a1ce6a 100644
--- a/drivers/usb/gadget/function/f_phonet.c
+++ b/drivers/usb/gadget/function/f_phonet.c
@@ -281,7 +281,7 @@
dev->tx_queue_len = 1;
dev->netdev_ops = &pn_netdev_ops;
- dev->destructor = free_netdev;
+ dev->needs_free_netdev = true;
dev->header_ops = &phonet_header_ops;
}
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
index b9ca0a2..684900f 100644
--- a/drivers/usb/gadget/legacy/inode.c
+++ b/drivers/usb/gadget/legacy/inode.c
@@ -1183,8 +1183,10 @@
/* closing ep0 === shutdown all */
- if (dev->gadget_registered)
+ if (dev->gadget_registered) {
usb_gadget_unregister_driver (&gadgetfs_driver);
+ dev->gadget_registered = false;
+ }
/* at this point "good" hardware has disconnected the
* device from USB; the host won't see it any more.
@@ -1677,9 +1679,10 @@
gadgetfs_suspend (struct usb_gadget *gadget)
{
struct dev_data *dev = get_gadget_data (gadget);
+ unsigned long flags;
INFO (dev, "suspended from state %d\n", dev->state);
- spin_lock (&dev->lock);
+ spin_lock_irqsave(&dev->lock, flags);
switch (dev->state) {
case STATE_DEV_SETUP: // VERY odd... host died??
case STATE_DEV_CONNECTED:
@@ -1690,7 +1693,7 @@
default:
break;
}
- spin_unlock (&dev->lock);
+ spin_unlock_irqrestore(&dev->lock, flags);
}
static struct usb_gadget_driver gadgetfs_driver = {
diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
index ccabb51..7635fd7 100644
--- a/drivers/usb/gadget/udc/dummy_hcd.c
+++ b/drivers/usb/gadget/udc/dummy_hcd.c
@@ -442,23 +442,16 @@
/* Report reset and disconnect events to the driver */
if (dum->driver && (disconnect || reset)) {
stop_activity(dum);
- spin_unlock(&dum->lock);
if (reset)
usb_gadget_udc_reset(&dum->gadget, dum->driver);
else
dum->driver->disconnect(&dum->gadget);
- spin_lock(&dum->lock);
}
} else if (dum_hcd->active != dum_hcd->old_active) {
- if (dum_hcd->old_active && dum->driver->suspend) {
- spin_unlock(&dum->lock);
+ if (dum_hcd->old_active && dum->driver->suspend)
dum->driver->suspend(&dum->gadget);
- spin_lock(&dum->lock);
- } else if (!dum_hcd->old_active && dum->driver->resume) {
- spin_unlock(&dum->lock);
+ else if (!dum_hcd->old_active && dum->driver->resume)
dum->driver->resume(&dum->gadget);
- spin_lock(&dum->lock);
- }
}
dum_hcd->old_status = dum_hcd->port_status;
@@ -983,7 +976,9 @@
struct dummy_hcd *dum_hcd = gadget_to_dummy_hcd(g);
struct dummy *dum = dum_hcd->dum;
+ spin_lock_irq(&dum->lock);
dum->driver = NULL;
+ spin_unlock_irq(&dum->lock);
return 0;
}
diff --git a/drivers/usb/gadget/udc/net2280.c b/drivers/usb/gadget/udc/net2280.c
index 6cf0785..f2cbd7f 100644
--- a/drivers/usb/gadget/udc/net2280.c
+++ b/drivers/usb/gadget/udc/net2280.c
@@ -2470,11 +2470,8 @@
nuke(&dev->ep[i]);
/* report disconnect; the driver is already quiesced */
- if (driver) {
- spin_unlock(&dev->lock);
+ if (driver)
driver->disconnect(&dev->gadget);
- spin_lock(&dev->lock);
- }
usb_reinit(dev);
}
@@ -3348,8 +3345,6 @@
BIT(PCI_RETRY_ABORT_INTERRUPT))
static void handle_stat1_irqs(struct net2280 *dev, u32 stat)
-__releases(dev->lock)
-__acquires(dev->lock)
{
struct net2280_ep *ep;
u32 tmp, num, mask, scratch;
@@ -3390,14 +3385,12 @@
if (disconnect || reset) {
stop_activity(dev, dev->driver);
ep0_start(dev);
- spin_unlock(&dev->lock);
if (reset)
usb_gadget_udc_reset
(&dev->gadget, dev->driver);
else
(dev->driver->disconnect)
(&dev->gadget);
- spin_lock(&dev->lock);
return;
}
}
diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
index 5a2d845..cd4c885 100644
--- a/drivers/usb/gadget/udc/renesas_usb3.c
+++ b/drivers/usb/gadget/udc/renesas_usb3.c
@@ -623,7 +623,6 @@
{
usb3_disconnect(usb3);
usb3_write(usb3, 0, USB3_P0_INT_ENA);
- usb3_write(usb3, 0, USB3_PN_INT_ENA);
usb3_write(usb3, 0, USB3_USB_OTG_INT_ENA);
usb3_write(usb3, 0, USB3_USB_INT_ENA_1);
usb3_write(usb3, 0, USB3_USB_INT_ENA_2);
@@ -1475,7 +1474,13 @@
struct renesas_usb3_request *usb3_req,
int status)
{
- usb3_pn_stop(usb3);
+ unsigned long flags;
+
+ spin_lock_irqsave(&usb3->lock, flags);
+ if (usb3_pn_change(usb3, usb3_ep->num))
+ usb3_pn_stop(usb3);
+ spin_unlock_irqrestore(&usb3->lock, flags);
+
usb3_disable_pipe_irq(usb3, usb3_ep->num);
usb3_request_done(usb3_ep, usb3_req, status);
@@ -1504,30 +1509,46 @@
{
struct renesas_usb3_ep *usb3_ep = usb3_get_ep(usb3, num);
struct renesas_usb3_request *usb3_req = usb3_get_request(usb3_ep);
+ bool done = false;
if (!usb3_req)
return;
+ spin_lock(&usb3->lock);
+ if (usb3_pn_change(usb3, num))
+ goto out;
+
if (usb3_ep->dir_in) {
/* Do not stop the IN pipe here to detect LSTTR interrupt */
if (!usb3_write_pipe(usb3_ep, usb3_req, USB3_PN_WRITE))
usb3_clear_bit(usb3, PN_INT_BFRDY, USB3_PN_INT_ENA);
} else {
if (!usb3_read_pipe(usb3_ep, usb3_req, USB3_PN_READ))
- usb3_request_done_pipen(usb3, usb3_ep, usb3_req, 0);
+ done = true;
}
+
+out:
+ /* need to unlock because usb3_request_done_pipen() locks it */
+ spin_unlock(&usb3->lock);
+
+ if (done)
+ usb3_request_done_pipen(usb3, usb3_ep, usb3_req, 0);
}
static void usb3_irq_epc_pipen(struct renesas_usb3 *usb3, int num)
{
u32 pn_int_sta;
- if (usb3_pn_change(usb3, num) < 0)
+ spin_lock(&usb3->lock);
+ if (usb3_pn_change(usb3, num) < 0) {
+ spin_unlock(&usb3->lock);
return;
+ }
pn_int_sta = usb3_read(usb3, USB3_PN_INT_STA);
pn_int_sta &= usb3_read(usb3, USB3_PN_INT_ENA);
usb3_write(usb3, pn_int_sta, USB3_PN_INT_STA);
+ spin_unlock(&usb3->lock);
if (pn_int_sta & PN_INT_LSTTR)
usb3_irq_epc_pipen_lsttr(usb3, num);
if (pn_int_sta & PN_INT_BFRDY)
@@ -1660,6 +1681,7 @@
spin_lock_irqsave(&usb3->lock, flags);
if (!usb3_pn_change(usb3, usb3_ep->num)) {
+ usb3_write(usb3, 0, USB3_PN_INT_ENA);
usb3_write(usb3, 0, USB3_PN_RAMMAP);
usb3_clear_bit(usb3, PN_CON_EN, USB3_PN_CON);
}
@@ -1799,6 +1821,9 @@
/* hook up the driver */
usb3->driver = driver;
+ pm_runtime_enable(usb3_to_dev(usb3));
+ pm_runtime_get_sync(usb3_to_dev(usb3));
+
renesas_usb3_init_controller(usb3);
return 0;
@@ -1807,14 +1832,14 @@
static int renesas_usb3_stop(struct usb_gadget *gadget)
{
struct renesas_usb3 *usb3 = gadget_to_renesas_usb3(gadget);
- unsigned long flags;
- spin_lock_irqsave(&usb3->lock, flags);
usb3->softconnect = false;
usb3->gadget.speed = USB_SPEED_UNKNOWN;
usb3->driver = NULL;
renesas_usb3_stop_controller(usb3);
- spin_unlock_irqrestore(&usb3->lock, flags);
+
+ pm_runtime_put(usb3_to_dev(usb3));
+ pm_runtime_disable(usb3_to_dev(usb3));
return 0;
}
@@ -1891,9 +1916,6 @@
device_remove_file(&pdev->dev, &dev_attr_role);
- pm_runtime_put(&pdev->dev);
- pm_runtime_disable(&pdev->dev);
-
usb_del_gadget_udc(&usb3->gadget);
__renesas_usb3_ep_free_request(usb3->ep0_req);
@@ -2099,9 +2121,6 @@
usb3->workaround_for_vbus = priv->workaround_for_vbus;
- pm_runtime_enable(&pdev->dev);
- pm_runtime_get_sync(&pdev->dev);
-
dev_info(&pdev->dev, "probed\n");
return 0;
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 1f1687e..fddf273 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -2119,11 +2119,12 @@
{
u32 temp, port_offset, port_count;
int i;
- u8 major_revision;
+ u8 major_revision, minor_revision;
struct xhci_hub *rhub;
temp = readl(addr);
major_revision = XHCI_EXT_PORT_MAJOR(temp);
+ minor_revision = XHCI_EXT_PORT_MINOR(temp);
if (major_revision == 0x03) {
rhub = &xhci->usb3_rhub;
@@ -2137,7 +2138,9 @@
return;
}
rhub->maj_rev = XHCI_EXT_PORT_MAJOR(temp);
- rhub->min_rev = XHCI_EXT_PORT_MINOR(temp);
+
+ if (rhub->min_rev < minor_revision)
+ rhub->min_rev = minor_revision;
/* Port offset and count in the third dword, see section 7.2 */
temp = readl(addr + 2);
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index fcf1f3f..1bcf971 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -201,6 +201,9 @@
if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA &&
pdev->device == 0x1042)
xhci->quirks |= XHCI_BROKEN_STREAMS;
+ if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA &&
+ pdev->device == 0x1142)
+ xhci->quirks |= XHCI_TRUST_TX_LENGTH;
if (pdev->vendor == PCI_VENDOR_ID_TI && pdev->device == 0x8241)
xhci->quirks |= XHCI_LIMIT_ENDPOINT_INTERVAL_7;
diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c
index 9c7ee26..bc6a9be 100644
--- a/drivers/usb/musb/musb_dsps.c
+++ b/drivers/usb/musb/musb_dsps.c
@@ -245,6 +245,11 @@
dsps_mod_timer_optional(glue);
break;
case OTG_STATE_A_WAIT_BCON:
+ /* keep VBUS on for host-only mode */
+ if (musb->port_mode == MUSB_PORT_MODE_HOST) {
+ dsps_mod_timer_optional(glue);
+ break;
+ }
musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
skip_session = 1;
/* fall */
diff --git a/drivers/video/fbdev/core/fbmon.c b/drivers/video/fbdev/core/fbmon.c
index 687ebb0..41d7979 100644
--- a/drivers/video/fbdev/core/fbmon.c
+++ b/drivers/video/fbdev/core/fbmon.c
@@ -1048,7 +1048,7 @@
for (i = 0; i < (128 - edid[2]) / DETAILED_TIMING_DESCRIPTION_SIZE;
i++, block += DETAILED_TIMING_DESCRIPTION_SIZE)
- if (PIXEL_CLOCK)
+ if (PIXEL_CLOCK != 0)
edt[num++] = block - edid;
/* Yikes, EDID data is totally useless */
diff --git a/drivers/video/fbdev/smscufx.c b/drivers/video/fbdev/smscufx.c
index ec2e7e3..449fcea 100644
--- a/drivers/video/fbdev/smscufx.c
+++ b/drivers/video/fbdev/smscufx.c
@@ -1646,8 +1646,9 @@
dev_dbg(dev->gdev, "%s %s - serial #%s\n",
usbdev->manufacturer, usbdev->product, usbdev->serial);
dev_dbg(dev->gdev, "vid_%04x&pid_%04x&rev_%04x driver's ufx_data struct at %p\n",
- usbdev->descriptor.idVendor, usbdev->descriptor.idProduct,
- usbdev->descriptor.bcdDevice, dev);
+ le16_to_cpu(usbdev->descriptor.idVendor),
+ le16_to_cpu(usbdev->descriptor.idProduct),
+ le16_to_cpu(usbdev->descriptor.bcdDevice), dev);
dev_dbg(dev->gdev, "console enable=%d\n", console);
dev_dbg(dev->gdev, "fb_defio enable=%d\n", fb_defio);
diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c
index 6a3c353..05ef657 100644
--- a/drivers/video/fbdev/udlfb.c
+++ b/drivers/video/fbdev/udlfb.c
@@ -1105,8 +1105,8 @@
char *bufptr;
struct urb *urb;
- pr_info("/dev/fb%d FB_BLANK mode %d --> %d\n",
- info->node, dev->blank_mode, blank_mode);
+ pr_debug("/dev/fb%d FB_BLANK mode %d --> %d\n",
+ info->node, dev->blank_mode, blank_mode);
if ((dev->blank_mode == FB_BLANK_POWERDOWN) &&
(blank_mode != FB_BLANK_POWERDOWN)) {
@@ -1613,8 +1613,9 @@
pr_info("%s %s - serial #%s\n",
usbdev->manufacturer, usbdev->product, usbdev->serial);
pr_info("vid_%04x&pid_%04x&rev_%04x driver's dlfb_data struct at %p\n",
- usbdev->descriptor.idVendor, usbdev->descriptor.idProduct,
- usbdev->descriptor.bcdDevice, dev);
+ le16_to_cpu(usbdev->descriptor.idVendor),
+ le16_to_cpu(usbdev->descriptor.idProduct),
+ le16_to_cpu(usbdev->descriptor.bcdDevice), dev);
pr_info("console enable=%d\n", console);
pr_info("fb_defio enable=%d\n", fb_defio);
pr_info("shadow enable=%d\n", shadow);
diff --git a/drivers/video/fbdev/via/viafbdev.c b/drivers/video/fbdev/via/viafbdev.c
index f9718f0..badee04 100644
--- a/drivers/video/fbdev/via/viafbdev.c
+++ b/drivers/video/fbdev/via/viafbdev.c
@@ -1630,16 +1630,14 @@
}
static void viafb_remove_proc(struct viafb_shared *shared)
{
- struct proc_dir_entry *viafb_entry = shared->proc_entry,
- *iga1_entry = shared->iga1_proc_entry,
- *iga2_entry = shared->iga2_proc_entry;
+ struct proc_dir_entry *viafb_entry = shared->proc_entry;
if (!viafb_entry)
return;
- remove_proc_entry("output_devices", iga2_entry);
+ remove_proc_entry("output_devices", shared->iga2_proc_entry);
remove_proc_entry("iga2", viafb_entry);
- remove_proc_entry("output_devices", iga1_entry);
+ remove_proc_entry("output_devices", shared->iga1_proc_entry);
remove_proc_entry("iga1", viafb_entry);
remove_proc_entry("supported_output_devices", viafb_entry);
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 408c174..22caf80 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -663,6 +663,12 @@
}
#endif
+static int virtballoon_validate(struct virtio_device *vdev)
+{
+ __virtio_clear_bit(vdev, VIRTIO_F_IOMMU_PLATFORM);
+ return 0;
+}
+
static unsigned int features[] = {
VIRTIO_BALLOON_F_MUST_TELL_HOST,
VIRTIO_BALLOON_F_STATS_VQ,
@@ -675,6 +681,7 @@
.driver.name = KBUILD_MODNAME,
.driver.owner = THIS_MODULE,
.id_table = id_table,
+ .validate = virtballoon_validate,
.probe = virtballoon_probe,
.remove = virtballoon_remove,
.config_changed = virtballoon_changed,
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index 7a92a5e..feca75b 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -362,8 +362,8 @@
st->global_error = 1;
}
}
- st->va += PAGE_SIZE * nr;
- st->index += nr;
+ st->va += XEN_PAGE_SIZE * nr;
+ st->index += nr / XEN_PFN_PER_PAGE;
return 0;
}
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 643c70d..4f8f75d 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -2563,7 +2563,7 @@
static inline u64 btrfs_calc_trans_metadata_size(struct btrfs_fs_info *fs_info,
unsigned num_items)
{
- return fs_info->nodesize * BTRFS_MAX_LEVEL * 2 * num_items;
+ return (u64)fs_info->nodesize * BTRFS_MAX_LEVEL * 2 * num_items;
}
/*
@@ -2573,7 +2573,7 @@
static inline u64 btrfs_calc_trunc_metadata_size(struct btrfs_fs_info *fs_info,
unsigned num_items)
{
- return fs_info->nodesize * BTRFS_MAX_LEVEL * num_items;
+ return (u64)fs_info->nodesize * BTRFS_MAX_LEVEL * num_items;
}
int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans,
diff --git a/fs/btrfs/dir-item.c b/fs/btrfs/dir-item.c
index 60a7506..c24d615 100644
--- a/fs/btrfs/dir-item.c
+++ b/fs/btrfs/dir-item.c
@@ -468,7 +468,7 @@
if (btrfs_dir_name_len(leaf, dir_item) > namelen) {
btrfs_crit(fs_info, "invalid dir item name len: %u",
- (unsigned)btrfs_dir_data_len(leaf, dir_item));
+ (unsigned)btrfs_dir_name_len(leaf, dir_item));
return 1;
}
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 8685d67..5f678dc 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -3467,10 +3467,12 @@
* we fua the first super. The others we allow
* to go down lazy.
*/
- if (i == 0)
- ret = btrfsic_submit_bh(REQ_OP_WRITE, REQ_FUA, bh);
- else
+ if (i == 0) {
+ ret = btrfsic_submit_bh(REQ_OP_WRITE,
+ REQ_SYNC | REQ_FUA, bh);
+ } else {
ret = btrfsic_submit_bh(REQ_OP_WRITE, REQ_SYNC, bh);
+ }
if (ret)
errors++;
}
@@ -3535,7 +3537,7 @@
bio->bi_end_io = btrfs_end_empty_barrier;
bio->bi_bdev = device->bdev;
- bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
+ bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH;
init_completion(&device->flush_wait);
bio->bi_private = &device->flush_wait;
device->flush_bio = bio;
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index e390451..33d979e 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -3993,6 +3993,7 @@
info->space_info_kobj, "%s",
alloc_name(found->flags));
if (ret) {
+ percpu_counter_destroy(&found->total_bytes_pinned);
kfree(found);
return ret;
}
@@ -4844,7 +4845,7 @@
spin_unlock(&delayed_rsv->lock);
commit:
- trans = btrfs_join_transaction(fs_info->fs_root);
+ trans = btrfs_join_transaction(fs_info->extent_root);
if (IS_ERR(trans))
return -ENOSPC;
@@ -4862,7 +4863,7 @@
struct btrfs_space_info *space_info, u64 num_bytes,
u64 orig_bytes, int state)
{
- struct btrfs_root *root = fs_info->fs_root;
+ struct btrfs_root *root = fs_info->extent_root;
struct btrfs_trans_handle *trans;
int nr;
int ret = 0;
@@ -5062,7 +5063,7 @@
int flush_state = FLUSH_DELAYED_ITEMS_NR;
spin_lock(&space_info->lock);
- to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info->fs_root,
+ to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info->extent_root,
space_info);
if (!to_reclaim) {
spin_unlock(&space_info->lock);
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index d8da3ed..d3619e0 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -2458,7 +2458,7 @@
if (!uptodate) {
ClearPageUptodate(page);
SetPageError(page);
- ret = ret < 0 ? ret : -EIO;
+ ret = err < 0 ? err : -EIO;
mapping_set_error(page->mapping, ret);
}
}
@@ -4377,6 +4377,123 @@
return NULL;
}
+/*
+ * To cache previous fiemap extent
+ *
+ * Will be used for merging fiemap extent
+ */
+struct fiemap_cache {
+ u64 offset;
+ u64 phys;
+ u64 len;
+ u32 flags;
+ bool cached;
+};
+
+/*
+ * Helper to submit fiemap extent.
+ *
+ * Will try to merge current fiemap extent specified by @offset, @phys,
+ * @len and @flags with cached one.
+ * And only when we fails to merge, cached one will be submitted as
+ * fiemap extent.
+ *
+ * Return value is the same as fiemap_fill_next_extent().
+ */
+static int emit_fiemap_extent(struct fiemap_extent_info *fieinfo,
+ struct fiemap_cache *cache,
+ u64 offset, u64 phys, u64 len, u32 flags)
+{
+ int ret = 0;
+
+ if (!cache->cached)
+ goto assign;
+
+ /*
+ * Sanity check, extent_fiemap() should have ensured that new
+ * fiemap extent won't overlap with cahced one.
+ * Not recoverable.
+ *
+ * NOTE: Physical address can overlap, due to compression
+ */
+ if (cache->offset + cache->len > offset) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ /*
+ * Only merges fiemap extents if
+ * 1) Their logical addresses are continuous
+ *
+ * 2) Their physical addresses are continuous
+ * So truly compressed (physical size smaller than logical size)
+ * extents won't get merged with each other
+ *
+ * 3) Share same flags except FIEMAP_EXTENT_LAST
+ * So regular extent won't get merged with prealloc extent
+ */
+ if (cache->offset + cache->len == offset &&
+ cache->phys + cache->len == phys &&
+ (cache->flags & ~FIEMAP_EXTENT_LAST) ==
+ (flags & ~FIEMAP_EXTENT_LAST)) {
+ cache->len += len;
+ cache->flags |= flags;
+ goto try_submit_last;
+ }
+
+ /* Not mergeable, need to submit cached one */
+ ret = fiemap_fill_next_extent(fieinfo, cache->offset, cache->phys,
+ cache->len, cache->flags);
+ cache->cached = false;
+ if (ret)
+ return ret;
+assign:
+ cache->cached = true;
+ cache->offset = offset;
+ cache->phys = phys;
+ cache->len = len;
+ cache->flags = flags;
+try_submit_last:
+ if (cache->flags & FIEMAP_EXTENT_LAST) {
+ ret = fiemap_fill_next_extent(fieinfo, cache->offset,
+ cache->phys, cache->len, cache->flags);
+ cache->cached = false;
+ }
+ return ret;
+}
+
+/*
+ * Sanity check for fiemap cache
+ *
+ * All fiemap cache should be submitted by emit_fiemap_extent()
+ * Iteration should be terminated either by last fiemap extent or
+ * fieinfo->fi_extents_max.
+ * So no cached fiemap should exist.
+ */
+static int check_fiemap_cache(struct btrfs_fs_info *fs_info,
+ struct fiemap_extent_info *fieinfo,
+ struct fiemap_cache *cache)
+{
+ int ret;
+
+ if (!cache->cached)
+ return 0;
+
+ /* Small and recoverbale problem, only to info developer */
+#ifdef CONFIG_BTRFS_DEBUG
+ WARN_ON(1);
+#endif
+ btrfs_warn(fs_info,
+ "unhandled fiemap cache detected: offset=%llu phys=%llu len=%llu flags=0x%x",
+ cache->offset, cache->phys, cache->len, cache->flags);
+ ret = fiemap_fill_next_extent(fieinfo, cache->offset, cache->phys,
+ cache->len, cache->flags);
+ cache->cached = false;
+ if (ret > 0)
+ ret = 0;
+ return ret;
+}
+
int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
__u64 start, __u64 len, get_extent_t *get_extent)
{
@@ -4394,6 +4511,7 @@
struct extent_state *cached_state = NULL;
struct btrfs_path *path;
struct btrfs_root *root = BTRFS_I(inode)->root;
+ struct fiemap_cache cache = { 0 };
int end = 0;
u64 em_start = 0;
u64 em_len = 0;
@@ -4573,8 +4691,8 @@
flags |= FIEMAP_EXTENT_LAST;
end = 1;
}
- ret = fiemap_fill_next_extent(fieinfo, em_start, disko,
- em_len, flags);
+ ret = emit_fiemap_extent(fieinfo, &cache, em_start, disko,
+ em_len, flags);
if (ret) {
if (ret == 1)
ret = 0;
@@ -4582,6 +4700,8 @@
}
}
out_free:
+ if (!ret)
+ ret = check_fiemap_cache(root->fs_info, fieinfo, &cache);
free_extent_map(em);
out:
btrfs_free_path(path);
diff --git a/fs/btrfs/hash.c b/fs/btrfs/hash.c
index a97fdc1..baacc18 100644
--- a/fs/btrfs/hash.c
+++ b/fs/btrfs/hash.c
@@ -38,6 +38,7 @@
{
SHASH_DESC_ON_STACK(shash, tfm);
u32 *ctx = (u32 *)shash_desc_ctx(shash);
+ u32 retval;
int err;
shash->tfm = tfm;
@@ -47,5 +48,7 @@
err = crypto_shash_update(shash, address, length);
BUG_ON(err);
- return *ctx;
+ retval = *ctx;
+ barrier_data(ctx);
+ return retval;
}
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 17cbe93..ef3c98c 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -2952,7 +2952,7 @@
ret = test_range_bit(io_tree, ordered_extent->file_offset,
ordered_extent->file_offset + ordered_extent->len - 1,
- EXTENT_DEFRAG, 1, cached_state);
+ EXTENT_DEFRAG, 0, cached_state);
if (ret) {
u64 last_snapshot = btrfs_root_last_snapshot(&root->root_item);
if (0 && last_snapshot >= BTRFS_I(inode)->generation)
@@ -7483,8 +7483,8 @@
int found = false;
void **pagep = NULL;
struct page *page = NULL;
- int start_idx;
- int end_idx;
+ unsigned long start_idx;
+ unsigned long end_idx;
start_idx = start >> PAGE_SHIFT;
diff --git a/fs/ceph/acl.c b/fs/ceph/acl.c
index 987044b..59cb307 100644
--- a/fs/ceph/acl.c
+++ b/fs/ceph/acl.c
@@ -131,6 +131,7 @@
}
if (new_mode != old_mode) {
+ newattrs.ia_ctime = current_time(inode);
newattrs.ia_mode = new_mode;
newattrs.ia_valid = ATTR_MODE;
ret = __ceph_setattr(inode, &newattrs);
diff --git a/fs/ceph/export.c b/fs/ceph/export.c
index e8f11fa..7df550c 100644
--- a/fs/ceph/export.c
+++ b/fs/ceph/export.c
@@ -91,6 +91,10 @@
ceph_mdsc_put_request(req);
if (!inode)
return ERR_PTR(-ESTALE);
+ if (inode->i_nlink == 0) {
+ iput(inode);
+ return ERR_PTR(-ESTALE);
+ }
}
return d_obtain_alias(inode);
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index dcce79b..4de6cdd 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -2022,7 +2022,6 @@
attr->ia_size > inode->i_size) {
i_size_write(inode, attr->ia_size);
inode->i_blocks = calc_inode_blocks(attr->ia_size);
- inode->i_ctime = attr->ia_ctime;
ci->i_reported_size = attr->ia_size;
dirtied |= CEPH_CAP_FILE_EXCL;
} else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
@@ -2044,7 +2043,6 @@
inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec,
attr->ia_ctime.tv_sec, attr->ia_ctime.tv_nsec,
only ? "ctime only" : "ignored");
- inode->i_ctime = attr->ia_ctime;
if (only) {
/*
* if kernel wants to dirty ctime but nothing else,
@@ -2067,7 +2065,7 @@
if (dirtied) {
inode_dirty_flags = __ceph_mark_dirty_caps(ci, dirtied,
&prealloc_cf);
- inode->i_ctime = current_time(inode);
+ inode->i_ctime = attr->ia_ctime;
}
release &= issued;
@@ -2085,6 +2083,7 @@
req->r_inode_drop = release;
req->r_args.setattr.mask = cpu_to_le32(mask);
req->r_num_caps = 1;
+ req->r_stamp = attr->ia_ctime;
err = ceph_mdsc_do_request(mdsc, NULL, req);
}
dout("setattr %p result=%d (%s locally, %d remote)\n", inode, err,
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index f38e56f..0c05df4 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -1687,7 +1687,6 @@
ceph_mdsc_create_request(struct ceph_mds_client *mdsc, int op, int mode)
{
struct ceph_mds_request *req = kzalloc(sizeof(*req), GFP_NOFS);
- struct timespec ts;
if (!req)
return ERR_PTR(-ENOMEM);
@@ -1706,8 +1705,7 @@
init_completion(&req->r_safe_completion);
INIT_LIST_HEAD(&req->r_unsafe_item);
- ktime_get_real_ts(&ts);
- req->r_stamp = timespec_trunc(ts, mdsc->fsc->sb->s_time_gran);
+ req->r_stamp = timespec_trunc(current_kernel_time(), mdsc->fsc->sb->s_time_gran);
req->r_op = op;
req->r_direct_mode = mode;
diff --git a/fs/configfs/item.c b/fs/configfs/item.c
index 8b2a994..a66f662 100644
--- a/fs/configfs/item.c
+++ b/fs/configfs/item.c
@@ -138,6 +138,14 @@
}
EXPORT_SYMBOL(config_item_get);
+struct config_item *config_item_get_unless_zero(struct config_item *item)
+{
+ if (item && kref_get_unless_zero(&item->ci_kref))
+ return item;
+ return NULL;
+}
+EXPORT_SYMBOL(config_item_get_unless_zero);
+
static void config_item_cleanup(struct config_item *item)
{
struct config_item_type *t = item->ci_type;
diff --git a/fs/configfs/symlink.c b/fs/configfs/symlink.c
index a6ab012..c8aabba 100644
--- a/fs/configfs/symlink.c
+++ b/fs/configfs/symlink.c
@@ -83,14 +83,13 @@
ret = -ENOMEM;
sl = kmalloc(sizeof(struct configfs_symlink), GFP_KERNEL);
if (sl) {
- sl->sl_target = config_item_get(item);
spin_lock(&configfs_dirent_lock);
if (target_sd->s_type & CONFIGFS_USET_DROPPING) {
spin_unlock(&configfs_dirent_lock);
- config_item_put(item);
kfree(sl);
return -ENOENT;
}
+ sl->sl_target = config_item_get(item);
list_add(&sl->sl_list, &target_sd->s_links);
spin_unlock(&configfs_dirent_lock);
ret = configfs_create_link(sl, parent_item->ci_dentry,
diff --git a/fs/dcache.c b/fs/dcache.c
index cddf397..a9f995f 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -1494,7 +1494,7 @@
{
struct detach_data *data = _data;
- if (!data->mountpoint && !data->select.found)
+ if (!data->mountpoint && list_empty(&data->select.dispose))
__d_drop(data->select.start);
}
@@ -1536,17 +1536,15 @@
d_walk(dentry, &data, detach_and_collect, check_and_drop);
- if (data.select.found)
+ if (!list_empty(&data.select.dispose))
shrink_dentry_list(&data.select.dispose);
+ else if (!data.mountpoint)
+ return;
if (data.mountpoint) {
detach_mounts(data.mountpoint);
dput(data.mountpoint);
}
-
- if (!data.mountpoint && !data.select.found)
- break;
-
cond_resched();
}
}
diff --git a/fs/ext4/acl.c b/fs/ext4/acl.c
index fd38993..3ec0e46 100644
--- a/fs/ext4/acl.c
+++ b/fs/ext4/acl.c
@@ -4,6 +4,7 @@
* Copyright (C) 2001-2003 Andreas Gruenbacher, <agruen@suse.de>
*/
+#include <linux/quotaops.h>
#include "ext4_jbd2.h"
#include "ext4.h"
#include "xattr.h"
@@ -232,6 +233,9 @@
handle_t *handle;
int error, retries = 0;
+ error = dquot_initialize(inode);
+ if (error)
+ return error;
retry:
handle = ext4_journal_start(inode, EXT4_HT_XATTR,
ext4_jbd2_credits_xattr(inode));
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 8e80461..3219154 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -2523,7 +2523,6 @@
int buf_size,
struct inode *dir,
struct ext4_filename *fname,
- const struct qstr *d_name,
unsigned int offset,
struct ext4_dir_entry_2 **res_dir);
extern int ext4_generic_delete_entry(handle_t *handle,
@@ -3007,7 +3006,6 @@
int *has_inline_data);
extern struct buffer_head *ext4_find_inline_entry(struct inode *dir,
struct ext4_filename *fname,
- const struct qstr *d_name,
struct ext4_dir_entry_2 **res_dir,
int *has_inline_data);
extern int ext4_delete_inline_entry(handle_t *handle,
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 2a97dff..3e36508 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -3413,13 +3413,13 @@
struct ext4_sb_info *sbi;
struct ext4_extent_header *eh;
struct ext4_map_blocks split_map;
- struct ext4_extent zero_ex;
+ struct ext4_extent zero_ex1, zero_ex2;
struct ext4_extent *ex, *abut_ex;
ext4_lblk_t ee_block, eof_block;
unsigned int ee_len, depth, map_len = map->m_len;
int allocated = 0, max_zeroout = 0;
int err = 0;
- int split_flag = 0;
+ int split_flag = EXT4_EXT_DATA_VALID2;
ext_debug("ext4_ext_convert_to_initialized: inode %lu, logical"
"block %llu, max_blocks %u\n", inode->i_ino,
@@ -3436,7 +3436,8 @@
ex = path[depth].p_ext;
ee_block = le32_to_cpu(ex->ee_block);
ee_len = ext4_ext_get_actual_len(ex);
- zero_ex.ee_len = 0;
+ zero_ex1.ee_len = 0;
+ zero_ex2.ee_len = 0;
trace_ext4_ext_convert_to_initialized_enter(inode, map, ex);
@@ -3576,62 +3577,52 @@
if (ext4_encrypted_inode(inode))
max_zeroout = 0;
- /* If extent is less than s_max_zeroout_kb, zeroout directly */
- if (max_zeroout && (ee_len <= max_zeroout)) {
- err = ext4_ext_zeroout(inode, ex);
- if (err)
- goto out;
- zero_ex.ee_block = ex->ee_block;
- zero_ex.ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex));
- ext4_ext_store_pblock(&zero_ex, ext4_ext_pblock(ex));
-
- err = ext4_ext_get_access(handle, inode, path + depth);
- if (err)
- goto out;
- ext4_ext_mark_initialized(ex);
- ext4_ext_try_to_merge(handle, inode, path, ex);
- err = ext4_ext_dirty(handle, inode, path + path->p_depth);
- goto out;
- }
-
/*
- * four cases:
+ * five cases:
* 1. split the extent into three extents.
- * 2. split the extent into two extents, zeroout the first half.
- * 3. split the extent into two extents, zeroout the second half.
+ * 2. split the extent into two extents, zeroout the head of the first
+ * extent.
+ * 3. split the extent into two extents, zeroout the tail of the second
+ * extent.
* 4. split the extent into two extents with out zeroout.
+ * 5. no splitting needed, just possibly zeroout the head and / or the
+ * tail of the extent.
*/
split_map.m_lblk = map->m_lblk;
split_map.m_len = map->m_len;
- if (max_zeroout && (allocated > map->m_len)) {
+ if (max_zeroout && (allocated > split_map.m_len)) {
if (allocated <= max_zeroout) {
- /* case 3 */
- zero_ex.ee_block =
- cpu_to_le32(map->m_lblk);
- zero_ex.ee_len = cpu_to_le16(allocated);
- ext4_ext_store_pblock(&zero_ex,
- ext4_ext_pblock(ex) + map->m_lblk - ee_block);
- err = ext4_ext_zeroout(inode, &zero_ex);
+ /* case 3 or 5 */
+ zero_ex1.ee_block =
+ cpu_to_le32(split_map.m_lblk +
+ split_map.m_len);
+ zero_ex1.ee_len =
+ cpu_to_le16(allocated - split_map.m_len);
+ ext4_ext_store_pblock(&zero_ex1,
+ ext4_ext_pblock(ex) + split_map.m_lblk +
+ split_map.m_len - ee_block);
+ err = ext4_ext_zeroout(inode, &zero_ex1);
if (err)
goto out;
- split_map.m_lblk = map->m_lblk;
split_map.m_len = allocated;
- } else if (map->m_lblk - ee_block + map->m_len < max_zeroout) {
- /* case 2 */
- if (map->m_lblk != ee_block) {
- zero_ex.ee_block = ex->ee_block;
- zero_ex.ee_len = cpu_to_le16(map->m_lblk -
+ }
+ if (split_map.m_lblk - ee_block + split_map.m_len <
+ max_zeroout) {
+ /* case 2 or 5 */
+ if (split_map.m_lblk != ee_block) {
+ zero_ex2.ee_block = ex->ee_block;
+ zero_ex2.ee_len = cpu_to_le16(split_map.m_lblk -
ee_block);
- ext4_ext_store_pblock(&zero_ex,
+ ext4_ext_store_pblock(&zero_ex2,
ext4_ext_pblock(ex));
- err = ext4_ext_zeroout(inode, &zero_ex);
+ err = ext4_ext_zeroout(inode, &zero_ex2);
if (err)
goto out;
}
+ split_map.m_len += split_map.m_lblk - ee_block;
split_map.m_lblk = ee_block;
- split_map.m_len = map->m_lblk - ee_block + map->m_len;
allocated = map->m_len;
}
}
@@ -3642,8 +3633,11 @@
err = 0;
out:
/* If we have gotten a failure, don't zero out status tree */
- if (!err)
- err = ext4_zeroout_es(inode, &zero_ex);
+ if (!err) {
+ err = ext4_zeroout_es(inode, &zero_ex1);
+ if (!err)
+ err = ext4_zeroout_es(inode, &zero_ex2);
+ }
return err ? err : allocated;
}
@@ -4883,6 +4877,8 @@
/* Zero out partial block at the edges of the range */
ret = ext4_zero_partial_blocks(handle, inode, offset, len);
+ if (ret >= 0)
+ ext4_update_inode_fsync_trans(handle, inode, 1);
if (file->f_flags & O_SYNC)
ext4_handle_sync(handle);
@@ -5569,6 +5565,7 @@
ext4_handle_sync(handle);
inode->i_mtime = inode->i_ctime = current_time(inode);
ext4_mark_inode_dirty(handle, inode);
+ ext4_update_inode_fsync_trans(handle, inode, 1);
out_stop:
ext4_journal_stop(handle);
@@ -5742,6 +5739,8 @@
up_write(&EXT4_I(inode)->i_data_sem);
if (IS_SYNC(inode))
ext4_handle_sync(handle);
+ if (ret >= 0)
+ ext4_update_inode_fsync_trans(handle, inode, 1);
out_stop:
ext4_journal_stop(handle);
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 831fd6b..02ce7e7 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -474,57 +474,37 @@
endoff = (loff_t)end_blk << blkbits;
index = startoff >> PAGE_SHIFT;
- end = endoff >> PAGE_SHIFT;
+ end = (endoff - 1) >> PAGE_SHIFT;
pagevec_init(&pvec, 0);
do {
int i, num;
unsigned long nr_pages;
- num = min_t(pgoff_t, end - index, PAGEVEC_SIZE);
+ num = min_t(pgoff_t, end - index, PAGEVEC_SIZE - 1) + 1;
nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index,
(pgoff_t)num);
- if (nr_pages == 0) {
- if (whence == SEEK_DATA)
- break;
-
- BUG_ON(whence != SEEK_HOLE);
- /*
- * If this is the first time to go into the loop and
- * offset is not beyond the end offset, it will be a
- * hole at this offset
- */
- if (lastoff == startoff || lastoff < endoff)
- found = 1;
+ if (nr_pages == 0)
break;
- }
-
- /*
- * If this is the first time to go into the loop and
- * offset is smaller than the first page offset, it will be a
- * hole at this offset.
- */
- if (lastoff == startoff && whence == SEEK_HOLE &&
- lastoff < page_offset(pvec.pages[0])) {
- found = 1;
- break;
- }
for (i = 0; i < nr_pages; i++) {
struct page *page = pvec.pages[i];
struct buffer_head *bh, *head;
/*
- * If the current offset is not beyond the end of given
- * range, it will be a hole.
+ * If current offset is smaller than the page offset,
+ * there is a hole at this offset.
*/
- if (lastoff < endoff && whence == SEEK_HOLE &&
- page->index > end) {
+ if (whence == SEEK_HOLE && lastoff < endoff &&
+ lastoff < page_offset(pvec.pages[i])) {
found = 1;
*offset = lastoff;
goto out;
}
+ if (page->index > end)
+ goto out;
+
lock_page(page);
if (unlikely(page->mapping != inode->i_mapping)) {
@@ -564,20 +544,18 @@
unlock_page(page);
}
- /*
- * The no. of pages is less than our desired, that would be a
- * hole in there.
- */
- if (nr_pages < num && whence == SEEK_HOLE) {
- found = 1;
- *offset = lastoff;
+ /* The no. of pages is less than our desired, we are done. */
+ if (nr_pages < num)
break;
- }
index = pvec.pages[i - 1]->index + 1;
pagevec_release(&pvec);
} while (index <= end);
+ if (whence == SEEK_HOLE && lastoff < endoff) {
+ found = 1;
+ *offset = lastoff;
+ }
out:
pagevec_release(&pvec);
return found;
diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
index d5dea4c..8d141c0 100644
--- a/fs/ext4/inline.c
+++ b/fs/ext4/inline.c
@@ -1627,7 +1627,6 @@
struct buffer_head *ext4_find_inline_entry(struct inode *dir,
struct ext4_filename *fname,
- const struct qstr *d_name,
struct ext4_dir_entry_2 **res_dir,
int *has_inline_data)
{
@@ -1649,7 +1648,7 @@
EXT4_INLINE_DOTDOT_SIZE;
inline_size = EXT4_MIN_INLINE_DATA_SIZE - EXT4_INLINE_DOTDOT_SIZE;
ret = ext4_search_dir(iloc.bh, inline_start, inline_size,
- dir, fname, d_name, 0, res_dir);
+ dir, fname, 0, res_dir);
if (ret == 1)
goto out_find;
if (ret < 0)
@@ -1662,7 +1661,7 @@
inline_size = ext4_get_inline_size(dir) - EXT4_MIN_INLINE_DATA_SIZE;
ret = ext4_search_dir(iloc.bh, inline_start, inline_size,
- dir, fname, d_name, 0, res_dir);
+ dir, fname, 0, res_dir);
if (ret == 1)
goto out_find;
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 1bd0bfa..5cf82d0 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -2124,15 +2124,29 @@
static int mpage_submit_page(struct mpage_da_data *mpd, struct page *page)
{
int len;
- loff_t size = i_size_read(mpd->inode);
+ loff_t size;
int err;
BUG_ON(page->index != mpd->first_page);
+ clear_page_dirty_for_io(page);
+ /*
+ * We have to be very careful here! Nothing protects writeback path
+ * against i_size changes and the page can be writeably mapped into
+ * page tables. So an application can be growing i_size and writing
+ * data through mmap while writeback runs. clear_page_dirty_for_io()
+ * write-protects our page in page tables and the page cannot get
+ * written to again until we release page lock. So only after
+ * clear_page_dirty_for_io() we are safe to sample i_size for
+ * ext4_bio_write_page() to zero-out tail of the written page. We rely
+ * on the barrier provided by TestClearPageDirty in
+ * clear_page_dirty_for_io() to make sure i_size is really sampled only
+ * after page tables are updated.
+ */
+ size = i_size_read(mpd->inode);
if (page->index == size >> PAGE_SHIFT)
len = size & ~PAGE_MASK;
else
len = PAGE_SIZE;
- clear_page_dirty_for_io(page);
err = ext4_bio_write_page(&mpd->io_submit, page, len, mpd->wbc, false);
if (!err)
mpd->wbc->nr_to_write--;
@@ -3629,9 +3643,6 @@
get_block_func = ext4_dio_get_block_unwritten_async;
dio_flags = DIO_LOCKING;
}
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
- BUG_ON(ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode));
-#endif
ret = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, iter,
get_block_func, ext4_end_io_dio, NULL,
dio_flags);
@@ -3713,7 +3724,7 @@
*/
inode_lock_shared(inode);
ret = filemap_write_and_wait_range(mapping, iocb->ki_pos,
- iocb->ki_pos + count);
+ iocb->ki_pos + count - 1);
if (ret)
goto out_unlock;
ret = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev,
@@ -4207,6 +4218,8 @@
inode->i_mtime = inode->i_ctime = current_time(inode);
ext4_mark_inode_dirty(handle, inode);
+ if (ret >= 0)
+ ext4_update_inode_fsync_trans(handle, inode, 1);
out_stop:
ext4_journal_stop(handle);
out_dio:
@@ -5637,8 +5650,9 @@
/* No extended attributes present */
if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR) ||
header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) {
- memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE, 0,
- new_extra_isize);
+ memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE +
+ EXT4_I(inode)->i_extra_isize, 0,
+ new_extra_isize - EXT4_I(inode)->i_extra_isize);
EXT4_I(inode)->i_extra_isize = new_extra_isize;
return 0;
}
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 5083bce2..b7928cd 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -3887,7 +3887,8 @@
err = ext4_mb_load_buddy(sb, group, &e4b);
if (err) {
- ext4_error(sb, "Error loading buddy information for %u", group);
+ ext4_warning(sb, "Error %d loading buddy information for %u",
+ err, group);
put_bh(bitmap_bh);
return 0;
}
@@ -4044,10 +4045,11 @@
BUG_ON(pa->pa_type != MB_INODE_PA);
group = ext4_get_group_number(sb, pa->pa_pstart);
- err = ext4_mb_load_buddy(sb, group, &e4b);
+ err = ext4_mb_load_buddy_gfp(sb, group, &e4b,
+ GFP_NOFS|__GFP_NOFAIL);
if (err) {
- ext4_error(sb, "Error loading buddy information for %u",
- group);
+ ext4_error(sb, "Error %d loading buddy information for %u",
+ err, group);
continue;
}
@@ -4303,11 +4305,14 @@
spin_unlock(&lg->lg_prealloc_lock);
list_for_each_entry_safe(pa, tmp, &discard_list, u.pa_tmp_list) {
+ int err;
group = ext4_get_group_number(sb, pa->pa_pstart);
- if (ext4_mb_load_buddy(sb, group, &e4b)) {
- ext4_error(sb, "Error loading buddy information for %u",
- group);
+ err = ext4_mb_load_buddy_gfp(sb, group, &e4b,
+ GFP_NOFS|__GFP_NOFAIL);
+ if (err) {
+ ext4_error(sb, "Error %d loading buddy information for %u",
+ err, group);
continue;
}
ext4_lock_group(sb, group);
@@ -5127,8 +5132,8 @@
ret = ext4_mb_load_buddy(sb, group, &e4b);
if (ret) {
- ext4_error(sb, "Error in loading buddy "
- "information for %u", group);
+ ext4_warning(sb, "Error %d loading buddy information for %u",
+ ret, group);
return ret;
}
bitmap = e4b.bd_bitmap;
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index b81f7d4..404256c 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -1155,12 +1155,11 @@
static inline int search_dirblock(struct buffer_head *bh,
struct inode *dir,
struct ext4_filename *fname,
- const struct qstr *d_name,
unsigned int offset,
struct ext4_dir_entry_2 **res_dir)
{
return ext4_search_dir(bh, bh->b_data, dir->i_sb->s_blocksize, dir,
- fname, d_name, offset, res_dir);
+ fname, offset, res_dir);
}
/*
@@ -1262,7 +1261,6 @@
*/
int ext4_search_dir(struct buffer_head *bh, char *search_buf, int buf_size,
struct inode *dir, struct ext4_filename *fname,
- const struct qstr *d_name,
unsigned int offset, struct ext4_dir_entry_2 **res_dir)
{
struct ext4_dir_entry_2 * de;
@@ -1355,7 +1353,7 @@
if (ext4_has_inline_data(dir)) {
int has_inline_data = 1;
- ret = ext4_find_inline_entry(dir, &fname, d_name, res_dir,
+ ret = ext4_find_inline_entry(dir, &fname, res_dir,
&has_inline_data);
if (has_inline_data) {
if (inlined)
@@ -1447,7 +1445,7 @@
goto next;
}
set_buffer_verified(bh);
- i = search_dirblock(bh, dir, &fname, d_name,
+ i = search_dirblock(bh, dir, &fname,
block << EXT4_BLOCK_SIZE_BITS(sb), res_dir);
if (i == 1) {
EXT4_I(dir)->i_dir_start_lookup = block;
@@ -1488,7 +1486,6 @@
{
struct super_block * sb = dir->i_sb;
struct dx_frame frames[2], *frame;
- const struct qstr *d_name = fname->usr_fname;
struct buffer_head *bh;
ext4_lblk_t block;
int retval;
@@ -1505,7 +1502,7 @@
if (IS_ERR(bh))
goto errout;
- retval = search_dirblock(bh, dir, fname, d_name,
+ retval = search_dirblock(bh, dir, fname,
block << EXT4_BLOCK_SIZE_BITS(sb),
res_dir);
if (retval == 1)
@@ -1530,7 +1527,7 @@
bh = NULL;
errout:
- dxtrace(printk(KERN_DEBUG "%s not found\n", d_name->name));
+ dxtrace(printk(KERN_DEBUG "%s not found\n", fname->usr_fname->name));
success:
dx_release(frames);
return bh;
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 0b177da..d37c81f 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -848,14 +848,9 @@
{
int type;
- if (ext4_has_feature_quota(sb)) {
- dquot_disable(sb, -1,
- DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
- } else {
- /* Use our quota_off function to clear inode flags etc. */
- for (type = 0; type < EXT4_MAXQUOTAS; type++)
- ext4_quota_off(sb, type);
- }
+ /* Use our quota_off function to clear inode flags etc. */
+ for (type = 0; type < EXT4_MAXQUOTAS; type++)
+ ext4_quota_off(sb, type);
}
#else
static inline void ext4_quota_off_umount(struct super_block *sb)
@@ -1179,6 +1174,9 @@
return res;
}
+ res = dquot_initialize(inode);
+ if (res)
+ return res;
retry:
handle = ext4_journal_start(inode, EXT4_HT_MISC,
ext4_jbd2_credits_xattr(inode));
@@ -5485,7 +5483,7 @@
goto out;
err = dquot_quota_off(sb, type);
- if (err)
+ if (err || ext4_has_feature_quota(sb))
goto out_put;
inode_lock(inode);
@@ -5505,6 +5503,7 @@
out_unlock:
inode_unlock(inode);
out_put:
+ lockdep_set_quota_inode(inode, I_DATA_SEM_NORMAL);
iput(inode);
return err;
out:
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index 8fb7ce1..5d3c253 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -888,6 +888,8 @@
else {
u32 ref;
+ WARN_ON_ONCE(dquot_initialize_needed(inode));
+
/* The old block is released after updating
the inode. */
error = dquot_alloc_block(inode,
@@ -954,6 +956,8 @@
/* We need to allocate a new block */
ext4_fsblk_t goal, block;
+ WARN_ON_ONCE(dquot_initialize_needed(inode));
+
goal = ext4_group_first_block_no(sb,
EXT4_I(inode)->i_block_group);
@@ -1166,6 +1170,7 @@
return -EINVAL;
if (strlen(name) > 255)
return -ERANGE;
+
ext4_write_lock_xattr(inode, &no_expand);
error = ext4_reserve_inode_write(handle, inode, &is.iloc);
@@ -1267,6 +1272,9 @@
int error, retries = 0;
int credits = ext4_jbd2_credits_xattr(inode);
+ error = dquot_initialize(inode);
+ if (error)
+ return error;
retry:
handle = ext4_journal_start(inode, EXT4_HT_XATTR, credits);
if (IS_ERR(handle)) {
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 2185c7a..fd2e651 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -1078,6 +1078,7 @@
{
SHASH_DESC_ON_STACK(shash, sbi->s_chksum_driver);
u32 *ctx = (u32 *)shash_desc_ctx(shash);
+ u32 retval;
int err;
shash->tfm = sbi->s_chksum_driver;
@@ -1087,7 +1088,9 @@
err = crypto_shash_update(shash, address, length);
BUG_ON(err);
- return *ctx;
+ retval = *ctx;
+ barrier_data(ctx);
+ return retval;
}
static inline bool f2fs_crc_valid(struct f2fs_sb_info *sbi, __u32 blk_crc,
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index dde8613..d44f545 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -200,7 +200,7 @@
addr = ALIGN(addr, huge_page_size(h));
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index 9ee4832..2d30a6d 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -680,6 +680,12 @@
rwsem_release(&journal->j_trans_commit_map, 1, _THIS_IP_);
handle->h_buffer_credits = nblocks;
+ /*
+ * Restore the original nofs context because the journal restart
+ * is basically the same thing as journal stop and start.
+ * start_this_handle will start a new nofs context.
+ */
+ memalloc_nofs_restore(handle->saved_alloc_context);
ret = start_this_handle(journal, handle, gfp_mask);
return ret;
}
diff --git a/fs/namespace.c b/fs/namespace.c
index 8bd3e4d..5a44384 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -3488,6 +3488,8 @@
return err;
}
+ put_mnt_ns(old_mnt_ns);
+
/* Update the pwd and root */
set_fs_pwd(fs, &root);
set_fs_root(fs, &root);
diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
index f5714ee..23542dc 100644
--- a/fs/nfs/flexfilelayout/flexfilelayout.c
+++ b/fs/nfs/flexfilelayout/flexfilelayout.c
@@ -454,6 +454,7 @@
goto out_err_free;
/* fh */
+ rc = -EIO;
p = xdr_inline_decode(&stream, 4);
if (!p)
goto out_err_free;
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index e9b4c33..3e24392 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -398,7 +398,6 @@
bool nfs_auth_info_match(const struct nfs_auth_info *, rpc_authflavor_t);
struct dentry *nfs_try_mount(int, const char *, struct nfs_mount_info *,
struct nfs_subversion *);
-void nfs_initialise_sb(struct super_block *);
int nfs_set_sb_security(struct super_block *, struct dentry *, struct nfs_mount_info *);
int nfs_clone_sb_security(struct super_block *, struct dentry *, struct nfs_mount_info *);
struct dentry *nfs_fs_mount_common(struct nfs_server *, int, const char *,
@@ -458,7 +457,6 @@
extern void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio);
/* super.c */
-void nfs_clone_super(struct super_block *, struct nfs_mount_info *);
void nfs_umount_begin(struct super_block *);
int nfs_statfs(struct dentry *, struct kstatfs *);
int nfs_show_options(struct seq_file *, struct dentry *);
diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c
index 929d09a..319a47d 100644
--- a/fs/nfs/nfs42proc.c
+++ b/fs/nfs/nfs42proc.c
@@ -177,7 +177,7 @@
if (status)
goto out;
- if (!nfs_write_verifier_cmp(&res->write_res.verifier.verifier,
+ if (nfs_write_verifier_cmp(&res->write_res.verifier.verifier,
&res->commit_res.verf->verifier)) {
status = -EAGAIN;
goto out;
diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
index 692a7a8..66776f0 100644
--- a/fs/nfs/nfs4client.c
+++ b/fs/nfs/nfs4client.c
@@ -582,7 +582,6 @@
*/
nfs4_schedule_path_down_recovery(pos);
default:
- spin_lock(&nn->nfs_client_lock);
goto out;
}
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index adc6ec2..c383d09 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -2094,12 +2094,26 @@
}
EXPORT_SYMBOL_GPL(pnfs_generic_pg_check_layout);
+/*
+ * Check for any intersection between the request and the pgio->pg_lseg,
+ * and if none, put this pgio->pg_lseg away.
+ */
+static void
+pnfs_generic_pg_check_range(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
+{
+ if (pgio->pg_lseg && !pnfs_lseg_request_intersecting(pgio->pg_lseg, req)) {
+ pnfs_put_lseg(pgio->pg_lseg);
+ pgio->pg_lseg = NULL;
+ }
+}
+
void
pnfs_generic_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
{
u64 rd_size = req->wb_bytes;
pnfs_generic_pg_check_layout(pgio);
+ pnfs_generic_pg_check_range(pgio, req);
if (pgio->pg_lseg == NULL) {
if (pgio->pg_dreq == NULL)
rd_size = i_size_read(pgio->pg_inode) - req_offset(req);
@@ -2131,6 +2145,7 @@
struct nfs_page *req, u64 wb_size)
{
pnfs_generic_pg_check_layout(pgio);
+ pnfs_generic_pg_check_range(pgio, req);
if (pgio->pg_lseg == NULL) {
pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
req->wb_context,
@@ -2191,16 +2206,10 @@
seg_end = pnfs_end_offset(pgio->pg_lseg->pls_range.offset,
pgio->pg_lseg->pls_range.length);
req_start = req_offset(req);
- WARN_ON_ONCE(req_start >= seg_end);
+
/* start of request is past the last byte of this segment */
- if (req_start >= seg_end) {
- /* reference the new lseg */
- if (pgio->pg_ops->pg_cleanup)
- pgio->pg_ops->pg_cleanup(pgio);
- if (pgio->pg_ops->pg_init)
- pgio->pg_ops->pg_init(pgio, req);
+ if (req_start >= seg_end)
return 0;
- }
/* adjust 'size' iff there are fewer bytes left in the
* segment than what nfs_generic_pg_test returned */
diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h
index 2d05b75..99731e3 100644
--- a/fs/nfs/pnfs.h
+++ b/fs/nfs/pnfs.h
@@ -593,6 +593,16 @@
return pnfs_is_range_intersecting(l1->offset, end1, l2->offset, end2);
}
+static inline bool
+pnfs_lseg_request_intersecting(struct pnfs_layout_segment *lseg, struct nfs_page *req)
+{
+ u64 seg_last = pnfs_end_offset(lseg->pls_range.offset, lseg->pls_range.length);
+ u64 req_last = req_offset(req) + req->wb_bytes;
+
+ return pnfs_is_range_intersecting(lseg->pls_range.offset, seg_last,
+ req_offset(req), req_last);
+}
+
extern unsigned int layoutstats_timer;
#ifdef NFS_DEBUG
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 2f3822a..eceb4ea 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -2301,7 +2301,7 @@
/*
* Initialise the common bits of the superblock
*/
-inline void nfs_initialise_sb(struct super_block *sb)
+static void nfs_initialise_sb(struct super_block *sb)
{
struct nfs_server *server = NFS_SB(sb);
@@ -2348,7 +2348,8 @@
/*
* Finish setting up a cloned NFS2/3/4 superblock
*/
-void nfs_clone_super(struct super_block *sb, struct nfs_mount_info *mount_info)
+static void nfs_clone_super(struct super_block *sb,
+ struct nfs_mount_info *mount_info)
{
const struct super_block *old_sb = mount_info->cloned->sb;
struct nfs_server *server = NFS_SB(sb);
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index f0c8b33..520802d 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -300,11 +300,7 @@
/* We don't show the stack guard page in /proc/maps */
start = vma->vm_start;
- if (stack_guard_page_start(vma, start))
- start += PAGE_SIZE;
end = vma->vm_end;
- if (stack_guard_page_end(vma, end))
- end -= PAGE_SIZE;
seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ",
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index ebf80c7..48813ae 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -1512,6 +1512,22 @@
}
EXPORT_SYMBOL(dquot_initialize);
+bool dquot_initialize_needed(struct inode *inode)
+{
+ struct dquot **dquots;
+ int i;
+
+ if (!dquot_active(inode))
+ return false;
+
+ dquots = i_dquot(inode);
+ for (i = 0; i < MAXQUOTAS; i++)
+ if (!dquots[i] && sb_has_quota_active(inode->i_sb, i))
+ return true;
+ return false;
+}
+EXPORT_SYMBOL(dquot_initialize_needed);
+
/*
* Release all quotas referenced by inode.
*
diff --git a/fs/read_write.c b/fs/read_write.c
index 47c1d44..19d4d88 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -1285,7 +1285,7 @@
if (!(file->f_mode & FMODE_CAN_WRITE))
goto out;
- ret = compat_do_readv_writev(WRITE, file, vec, vlen, pos, 0);
+ ret = compat_do_readv_writev(WRITE, file, vec, vlen, pos, flags);
out:
if (ret > 0)
diff --git a/fs/stat.c b/fs/stat.c
index f494b18..c356108 100644
--- a/fs/stat.c
+++ b/fs/stat.c
@@ -672,6 +672,7 @@
inode->i_bytes -= 512;
}
}
+EXPORT_SYMBOL(__inode_add_bytes);
void inode_add_bytes(struct inode *inode, loff_t bytes)
{
diff --git a/fs/ufs/balloc.c b/fs/ufs/balloc.c
index a0376a2..0315fea 100644
--- a/fs/ufs/balloc.c
+++ b/fs/ufs/balloc.c
@@ -82,7 +82,8 @@
ufs_error (sb, "ufs_free_fragments",
"bit already cleared for fragment %u", i);
}
-
+
+ inode_sub_bytes(inode, count << uspi->s_fshift);
fs32_add(sb, &ucg->cg_cs.cs_nffree, count);
uspi->cs_total.cs_nffree += count;
fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
@@ -184,6 +185,7 @@
ufs_error(sb, "ufs_free_blocks", "freeing free fragment");
}
ubh_setblock(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
+ inode_sub_bytes(inode, uspi->s_fpb << uspi->s_fshift);
if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
ufs_clusteracct (sb, ucpi, blkno, 1);
@@ -398,10 +400,12 @@
/*
* There is not enough space for user on the device
*/
- if (!capable(CAP_SYS_RESOURCE) && ufs_freespace(uspi, UFS_MINFREE) <= 0) {
- mutex_unlock(&UFS_SB(sb)->s_lock);
- UFSD("EXIT (FAILED)\n");
- return 0;
+ if (unlikely(ufs_freefrags(uspi) <= uspi->s_root_blocks)) {
+ if (!capable(CAP_SYS_RESOURCE)) {
+ mutex_unlock(&UFS_SB(sb)->s_lock);
+ UFSD("EXIT (FAILED)\n");
+ return 0;
+ }
}
if (goal >= uspi->s_size)
@@ -419,12 +423,12 @@
if (result) {
ufs_clear_frags(inode, result + oldcount,
newcount - oldcount, locked_page != NULL);
+ *err = 0;
write_seqlock(&UFS_I(inode)->meta_lock);
ufs_cpu_to_data_ptr(sb, p, result);
- write_sequnlock(&UFS_I(inode)->meta_lock);
- *err = 0;
UFS_I(inode)->i_lastfrag =
max(UFS_I(inode)->i_lastfrag, fragment + count);
+ write_sequnlock(&UFS_I(inode)->meta_lock);
}
mutex_unlock(&UFS_SB(sb)->s_lock);
UFSD("EXIT, result %llu\n", (unsigned long long)result);
@@ -437,8 +441,10 @@
result = ufs_add_fragments(inode, tmp, oldcount, newcount);
if (result) {
*err = 0;
+ read_seqlock_excl(&UFS_I(inode)->meta_lock);
UFS_I(inode)->i_lastfrag = max(UFS_I(inode)->i_lastfrag,
fragment + count);
+ read_sequnlock_excl(&UFS_I(inode)->meta_lock);
ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
locked_page != NULL);
mutex_unlock(&UFS_SB(sb)->s_lock);
@@ -472,16 +478,16 @@
if (result) {
ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
locked_page != NULL);
+ mutex_unlock(&UFS_SB(sb)->s_lock);
ufs_change_blocknr(inode, fragment - oldcount, oldcount,
uspi->s_sbbase + tmp,
uspi->s_sbbase + result, locked_page);
+ *err = 0;
write_seqlock(&UFS_I(inode)->meta_lock);
ufs_cpu_to_data_ptr(sb, p, result);
- write_sequnlock(&UFS_I(inode)->meta_lock);
- *err = 0;
UFS_I(inode)->i_lastfrag = max(UFS_I(inode)->i_lastfrag,
fragment + count);
- mutex_unlock(&UFS_SB(sb)->s_lock);
+ write_sequnlock(&UFS_I(inode)->meta_lock);
if (newcount < request)
ufs_free_fragments (inode, result + newcount, request - newcount);
ufs_free_fragments (inode, tmp, oldcount);
@@ -494,6 +500,20 @@
return 0;
}
+static bool try_add_frags(struct inode *inode, unsigned frags)
+{
+ unsigned size = frags * i_blocksize(inode);
+ spin_lock(&inode->i_lock);
+ __inode_add_bytes(inode, size);
+ if (unlikely((u32)inode->i_blocks != inode->i_blocks)) {
+ __inode_sub_bytes(inode, size);
+ spin_unlock(&inode->i_lock);
+ return false;
+ }
+ spin_unlock(&inode->i_lock);
+ return true;
+}
+
static u64 ufs_add_fragments(struct inode *inode, u64 fragment,
unsigned oldcount, unsigned newcount)
{
@@ -530,6 +550,9 @@
for (i = oldcount; i < newcount; i++)
if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
return 0;
+
+ if (!try_add_frags(inode, count))
+ return 0;
/*
* Block can be extended
*/
@@ -647,6 +670,7 @@
ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, goal + i);
i = uspi->s_fpb - count;
+ inode_sub_bytes(inode, i << uspi->s_fshift);
fs32_add(sb, &ucg->cg_cs.cs_nffree, i);
uspi->cs_total.cs_nffree += i;
fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, i);
@@ -657,6 +681,8 @@
result = ufs_bitmap_search (sb, ucpi, goal, allocsize);
if (result == INVBLOCK)
return 0;
+ if (!try_add_frags(inode, count))
+ return 0;
for (i = 0; i < count; i++)
ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, result + i);
@@ -716,6 +742,8 @@
return INVBLOCK;
ucpi->c_rotor = result;
gotit:
+ if (!try_add_frags(inode, uspi->s_fpb))
+ return 0;
blkno = ufs_fragstoblks(result);
ubh_clrblock (UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c
index 7e41aee..9f459026 100644
--- a/fs/ufs/inode.c
+++ b/fs/ufs/inode.c
@@ -235,7 +235,8 @@
p = ufs_get_direct_data_ptr(uspi, ufsi, block);
tmp = ufs_new_fragments(inode, p, lastfrag, ufs_data_ptr_to_cpu(sb, p),
- new_size, err, locked_page);
+ new_size - (lastfrag & uspi->s_fpbmask), err,
+ locked_page);
return tmp != 0;
}
@@ -284,7 +285,7 @@
goal += uspi->s_fpb;
}
tmp = ufs_new_fragments(inode, p, ufs_blknum(new_fragment),
- goal, uspi->s_fpb, err, locked_page);
+ goal, nfrags, err, locked_page);
if (!tmp) {
*err = -ENOSPC;
@@ -400,11 +401,20 @@
u64 phys64 = 0;
unsigned frag = fragment & uspi->s_fpbmask;
- if (!create) {
- phys64 = ufs_frag_map(inode, offsets, depth);
- goto out;
- }
+ phys64 = ufs_frag_map(inode, offsets, depth);
+ if (!create)
+ goto done;
+ if (phys64) {
+ if (fragment >= UFS_NDIR_FRAGMENT)
+ goto done;
+ read_seqlock_excl(&UFS_I(inode)->meta_lock);
+ if (fragment < UFS_I(inode)->i_lastfrag) {
+ read_sequnlock_excl(&UFS_I(inode)->meta_lock);
+ goto done;
+ }
+ read_sequnlock_excl(&UFS_I(inode)->meta_lock);
+ }
/* This code entered only while writing ....? */
mutex_lock(&UFS_I(inode)->truncate_mutex);
@@ -448,6 +458,11 @@
}
mutex_unlock(&UFS_I(inode)->truncate_mutex);
return err;
+
+done:
+ if (phys64)
+ map_bh(bh_result, sb, phys64 + frag);
+ return 0;
}
static int ufs_writepage(struct page *page, struct writeback_control *wbc)
@@ -841,8 +856,11 @@
truncate_inode_pages_final(&inode->i_data);
if (want_delete) {
inode->i_size = 0;
- if (inode->i_blocks)
+ if (inode->i_blocks &&
+ (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
+ S_ISLNK(inode->i_mode)))
ufs_truncate_blocks(inode);
+ ufs_update_inode(inode, inode_needs_sync(inode));
}
invalidate_inode_buffers(inode);
@@ -868,7 +886,6 @@
ctx->to = from + count;
}
-#define DIRECT_BLOCK ((inode->i_size + uspi->s_bsize - 1) >> uspi->s_bshift)
#define DIRECT_FRAGMENT ((inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift)
static void ufs_trunc_direct(struct inode *inode)
@@ -1100,25 +1117,30 @@
return err;
}
-static void __ufs_truncate_blocks(struct inode *inode)
+static void ufs_truncate_blocks(struct inode *inode)
{
struct ufs_inode_info *ufsi = UFS_I(inode);
struct super_block *sb = inode->i_sb;
struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
unsigned offsets[4];
- int depth = ufs_block_to_path(inode, DIRECT_BLOCK, offsets);
+ int depth;
int depth2;
unsigned i;
struct ufs_buffer_head *ubh[3];
void *p;
u64 block;
- if (!depth)
- return;
+ if (inode->i_size) {
+ sector_t last = (inode->i_size - 1) >> uspi->s_bshift;
+ depth = ufs_block_to_path(inode, last, offsets);
+ if (!depth)
+ return;
+ } else {
+ depth = 1;
+ }
- /* find the last non-zero in offsets[] */
for (depth2 = depth - 1; depth2; depth2--)
- if (offsets[depth2])
+ if (offsets[depth2] != uspi->s_apb - 1)
break;
mutex_lock(&ufsi->truncate_mutex);
@@ -1127,9 +1149,8 @@
offsets[0] = UFS_IND_BLOCK;
} else {
/* get the blocks that should be partially emptied */
- p = ufs_get_direct_data_ptr(uspi, ufsi, offsets[0]);
+ p = ufs_get_direct_data_ptr(uspi, ufsi, offsets[0]++);
for (i = 0; i < depth2; i++) {
- offsets[i]++; /* next branch is fully freed */
block = ufs_data_ptr_to_cpu(sb, p);
if (!block)
break;
@@ -1140,7 +1161,7 @@
write_sequnlock(&ufsi->meta_lock);
break;
}
- p = ubh_get_data_ptr(uspi, ubh[i], offsets[i + 1]);
+ p = ubh_get_data_ptr(uspi, ubh[i], offsets[i + 1]++);
}
while (i--)
free_branch_tail(inode, offsets[i + 1], ubh[i], depth - i - 1);
@@ -1155,7 +1176,9 @@
free_full_branch(inode, block, i - UFS_IND_BLOCK + 1);
}
}
+ read_seqlock_excl(&ufsi->meta_lock);
ufsi->i_lastfrag = DIRECT_FRAGMENT;
+ read_sequnlock_excl(&ufsi->meta_lock);
mark_inode_dirty(inode);
mutex_unlock(&ufsi->truncate_mutex);
}
@@ -1183,7 +1206,7 @@
truncate_setsize(inode, size);
- __ufs_truncate_blocks(inode);
+ ufs_truncate_blocks(inode);
inode->i_mtime = inode->i_ctime = current_time(inode);
mark_inode_dirty(inode);
out:
@@ -1191,16 +1214,6 @@
return err;
}
-static void ufs_truncate_blocks(struct inode *inode)
-{
- if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
- S_ISLNK(inode->i_mode)))
- return;
- if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
- return;
- __ufs_truncate_blocks(inode);
-}
-
int ufs_setattr(struct dentry *dentry, struct iattr *attr)
{
struct inode *inode = d_inode(dentry);
diff --git a/fs/ufs/super.c b/fs/ufs/super.c
index 131b2b7..d5300ad 100644
--- a/fs/ufs/super.c
+++ b/fs/ufs/super.c
@@ -480,7 +480,7 @@
usb3 = ubh_get_usb_third(uspi);
if ((mtype == UFS_MOUNT_UFSTYPE_44BSD &&
- (usb1->fs_flags & UFS_FLAGS_UPDATED)) ||
+ (usb2->fs_un.fs_u2.fs_maxbsize == usb1->fs_bsize)) ||
mtype == UFS_MOUNT_UFSTYPE_UFS2) {
/*we have statistic in different place, then usual*/
uspi->cs_total.cs_ndir = fs64_to_cpu(sb, usb2->fs_un.fs_u2.cs_ndir);
@@ -596,9 +596,7 @@
usb2 = ubh_get_usb_second(uspi);
usb3 = ubh_get_usb_third(uspi);
- if ((mtype == UFS_MOUNT_UFSTYPE_44BSD &&
- (usb1->fs_flags & UFS_FLAGS_UPDATED)) ||
- mtype == UFS_MOUNT_UFSTYPE_UFS2) {
+ if (mtype == UFS_MOUNT_UFSTYPE_UFS2) {
/*we have statistic in different place, then usual*/
usb2->fs_un.fs_u2.cs_ndir =
cpu_to_fs64(sb, uspi->cs_total.cs_ndir);
@@ -608,16 +606,26 @@
cpu_to_fs64(sb, uspi->cs_total.cs_nifree);
usb3->fs_un1.fs_u2.cs_nffree =
cpu_to_fs64(sb, uspi->cs_total.cs_nffree);
- } else {
- usb1->fs_cstotal.cs_ndir =
- cpu_to_fs32(sb, uspi->cs_total.cs_ndir);
- usb1->fs_cstotal.cs_nbfree =
- cpu_to_fs32(sb, uspi->cs_total.cs_nbfree);
- usb1->fs_cstotal.cs_nifree =
- cpu_to_fs32(sb, uspi->cs_total.cs_nifree);
- usb1->fs_cstotal.cs_nffree =
- cpu_to_fs32(sb, uspi->cs_total.cs_nffree);
+ goto out;
}
+
+ if (mtype == UFS_MOUNT_UFSTYPE_44BSD &&
+ (usb2->fs_un.fs_u2.fs_maxbsize == usb1->fs_bsize)) {
+ /* store stats in both old and new places */
+ usb2->fs_un.fs_u2.cs_ndir =
+ cpu_to_fs64(sb, uspi->cs_total.cs_ndir);
+ usb2->fs_un.fs_u2.cs_nbfree =
+ cpu_to_fs64(sb, uspi->cs_total.cs_nbfree);
+ usb3->fs_un1.fs_u2.cs_nifree =
+ cpu_to_fs64(sb, uspi->cs_total.cs_nifree);
+ usb3->fs_un1.fs_u2.cs_nffree =
+ cpu_to_fs64(sb, uspi->cs_total.cs_nffree);
+ }
+ usb1->fs_cstotal.cs_ndir = cpu_to_fs32(sb, uspi->cs_total.cs_ndir);
+ usb1->fs_cstotal.cs_nbfree = cpu_to_fs32(sb, uspi->cs_total.cs_nbfree);
+ usb1->fs_cstotal.cs_nifree = cpu_to_fs32(sb, uspi->cs_total.cs_nifree);
+ usb1->fs_cstotal.cs_nffree = cpu_to_fs32(sb, uspi->cs_total.cs_nffree);
+out:
ubh_mark_buffer_dirty(USPI_UBH(uspi));
ufs_print_super_stuff(sb, usb1, usb2, usb3);
UFSD("EXIT\n");
@@ -746,6 +754,23 @@
return;
}
+static u64 ufs_max_bytes(struct super_block *sb)
+{
+ struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
+ int bits = uspi->s_apbshift;
+ u64 res;
+
+ if (bits > 21)
+ res = ~0ULL;
+ else
+ res = UFS_NDADDR + (1LL << bits) + (1LL << (2*bits)) +
+ (1LL << (3*bits));
+
+ if (res >= (MAX_LFS_FILESIZE >> uspi->s_bshift))
+ return MAX_LFS_FILESIZE;
+ return res << uspi->s_bshift;
+}
+
static int ufs_fill_super(struct super_block *sb, void *data, int silent)
{
struct ufs_sb_info * sbi;
@@ -812,9 +837,8 @@
uspi->s_dirblksize = UFS_SECTOR_SIZE;
super_block_offset=UFS_SBLOCK;
- /* Keep 2Gig file limit. Some UFS variants need to override
- this but as I don't know which I'll let those in the know loosen
- the rules */
+ sb->s_maxbytes = MAX_LFS_FILESIZE;
+
switch (sbi->s_mount_opt & UFS_MOUNT_UFSTYPE) {
case UFS_MOUNT_UFSTYPE_44BSD:
UFSD("ufstype=44bsd\n");
@@ -980,6 +1004,13 @@
flags |= UFS_ST_SUN;
}
+ if ((flags & UFS_ST_MASK) == UFS_ST_44BSD &&
+ uspi->s_postblformat == UFS_42POSTBLFMT) {
+ if (!silent)
+ pr_err("this is not a 44bsd filesystem");
+ goto failed;
+ }
+
/*
* Check ufs magic number
*/
@@ -1127,8 +1158,8 @@
uspi->s_cgmask = fs32_to_cpu(sb, usb1->fs_cgmask);
if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) {
- uspi->s_u2_size = fs64_to_cpu(sb, usb3->fs_un1.fs_u2.fs_size);
- uspi->s_u2_dsize = fs64_to_cpu(sb, usb3->fs_un1.fs_u2.fs_dsize);
+ uspi->s_size = fs64_to_cpu(sb, usb3->fs_un1.fs_u2.fs_size);
+ uspi->s_dsize = fs64_to_cpu(sb, usb3->fs_un1.fs_u2.fs_dsize);
} else {
uspi->s_size = fs32_to_cpu(sb, usb1->fs_size);
uspi->s_dsize = fs32_to_cpu(sb, usb1->fs_dsize);
@@ -1177,6 +1208,9 @@
uspi->s_postbloff = fs32_to_cpu(sb, usb3->fs_postbloff);
uspi->s_rotbloff = fs32_to_cpu(sb, usb3->fs_rotbloff);
+ uspi->s_root_blocks = mul_u64_u32_div(uspi->s_dsize,
+ uspi->s_minfree, 100);
+
/*
* Compute another frequently used values
*/
@@ -1212,6 +1246,7 @@
"fast symlink size (%u)\n", uspi->s_maxsymlinklen);
uspi->s_maxsymlinklen = maxsymlen;
}
+ sb->s_maxbytes = ufs_max_bytes(sb);
sb->s_max_links = UFS_LINK_MAX;
inode = ufs_iget(sb, UFS_ROOTINO);
@@ -1365,19 +1400,17 @@
mutex_lock(&UFS_SB(sb)->s_lock);
usb3 = ubh_get_usb_third(uspi);
- if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) {
+ if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2)
buf->f_type = UFS2_MAGIC;
- buf->f_blocks = fs64_to_cpu(sb, usb3->fs_un1.fs_u2.fs_dsize);
- } else {
+ else
buf->f_type = UFS_MAGIC;
- buf->f_blocks = uspi->s_dsize;
- }
- buf->f_bfree = ufs_blkstofrags(uspi->cs_total.cs_nbfree) +
- uspi->cs_total.cs_nffree;
+
+ buf->f_blocks = uspi->s_dsize;
+ buf->f_bfree = ufs_freefrags(uspi);
buf->f_ffree = uspi->cs_total.cs_nifree;
buf->f_bsize = sb->s_blocksize;
- buf->f_bavail = (buf->f_bfree > (((long)buf->f_blocks / 100) * uspi->s_minfree))
- ? (buf->f_bfree - (((long)buf->f_blocks / 100) * uspi->s_minfree)) : 0;
+ buf->f_bavail = (buf->f_bfree > uspi->s_root_blocks)
+ ? (buf->f_bfree - uspi->s_root_blocks) : 0;
buf->f_files = uspi->s_ncg * uspi->s_ipg;
buf->f_namelen = UFS_MAXNAMLEN;
buf->f_fsid.val[0] = (u32)id;
diff --git a/fs/ufs/ufs_fs.h b/fs/ufs/ufs_fs.h
index 0cbd5d3..823d55a 100644
--- a/fs/ufs/ufs_fs.h
+++ b/fs/ufs/ufs_fs.h
@@ -733,10 +733,8 @@
__u32 s_dblkno; /* offset of first data after cg */
__u32 s_cgoffset; /* cylinder group offset in cylinder */
__u32 s_cgmask; /* used to calc mod fs_ntrak */
- __u32 s_size; /* number of blocks (fragments) in fs */
- __u32 s_dsize; /* number of data blocks in fs */
- __u64 s_u2_size; /* ufs2: number of blocks (fragments) in fs */
- __u64 s_u2_dsize; /*ufs2: number of data blocks in fs */
+ __u64 s_size; /* number of blocks (fragments) in fs */
+ __u64 s_dsize; /* number of data blocks in fs */
__u32 s_ncg; /* number of cylinder groups */
__u32 s_bsize; /* size of basic blocks */
__u32 s_fsize; /* size of fragments */
@@ -793,6 +791,7 @@
__u32 s_maxsymlinklen;/* upper limit on fast symlinks' size */
__s32 fs_magic; /* filesystem magic */
unsigned int s_dirblksize;
+ __u64 s_root_blocks;
};
/*
diff --git a/fs/ufs/util.c b/fs/ufs/util.c
index f41ad0a..02497a4 100644
--- a/fs/ufs/util.c
+++ b/fs/ufs/util.c
@@ -243,9 +243,8 @@
struct page *ufs_get_locked_page(struct address_space *mapping,
pgoff_t index)
{
- struct page *page;
-
- page = find_lock_page(mapping, index);
+ struct inode *inode = mapping->host;
+ struct page *page = find_lock_page(mapping, index);
if (!page) {
page = read_mapping_page(mapping, index, NULL);
@@ -253,7 +252,7 @@
printk(KERN_ERR "ufs_change_blocknr: "
"read_mapping_page error: ino %lu, index: %lu\n",
mapping->host->i_ino, index);
- goto out;
+ return page;
}
lock_page(page);
@@ -262,8 +261,7 @@
/* Truncate got there first */
unlock_page(page);
put_page(page);
- page = NULL;
- goto out;
+ return NULL;
}
if (!PageUptodate(page) || PageError(page)) {
@@ -272,11 +270,12 @@
printk(KERN_ERR "ufs_change_blocknr: "
"can not read page: ino %lu, index: %lu\n",
- mapping->host->i_ino, index);
+ inode->i_ino, index);
- page = ERR_PTR(-EIO);
+ return ERR_PTR(-EIO);
}
}
-out:
+ if (!page_has_buffers(page))
+ create_empty_buffers(page, 1 << inode->i_blkbits, 0);
return page;
}
diff --git a/fs/ufs/util.h b/fs/ufs/util.h
index b7fbf53..9fc7119 100644
--- a/fs/ufs/util.h
+++ b/fs/ufs/util.h
@@ -350,16 +350,11 @@
#define ubh_blkmap(ubh,begin,bit) \
((*ubh_get_addr(ubh, (begin) + ((bit) >> 3)) >> ((bit) & 7)) & (0xff >> (UFS_MAXFRAG - uspi->s_fpb)))
-/*
- * Determine the number of available frags given a
- * percentage to hold in reserve.
- */
static inline u64
-ufs_freespace(struct ufs_sb_private_info *uspi, int percentreserved)
+ufs_freefrags(struct ufs_sb_private_info *uspi)
{
return ufs_blkstofrags(uspi->cs_total.cs_nbfree) +
- uspi->cs_total.cs_nffree -
- (uspi->s_dsize * (percentreserved) / 100);
+ uspi->cs_total.cs_nffree;
}
/*
@@ -473,15 +468,19 @@
static inline int _ubh_isblockset_(struct ufs_sb_private_info * uspi,
struct ufs_buffer_head * ubh, unsigned begin, unsigned block)
{
+ u8 mask;
switch (uspi->s_fpb) {
case 8:
return (*ubh_get_addr (ubh, begin + block) == 0xff);
case 4:
- return (*ubh_get_addr (ubh, begin + (block >> 1)) == (0x0f << ((block & 0x01) << 2)));
+ mask = 0x0f << ((block & 0x01) << 2);
+ return (*ubh_get_addr (ubh, begin + (block >> 1)) & mask) == mask;
case 2:
- return (*ubh_get_addr (ubh, begin + (block >> 2)) == (0x03 << ((block & 0x03) << 1)));
+ mask = 0x03 << ((block & 0x03) << 1);
+ return (*ubh_get_addr (ubh, begin + (block >> 2)) & mask) == mask;
case 1:
- return (*ubh_get_addr (ubh, begin + (block >> 3)) == (0x01 << (block & 0x07)));
+ mask = 0x01 << (block & 0x07);
+ return (*ubh_get_addr (ubh, begin + (block >> 3)) & mask) == mask;
}
return 0;
}
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index f7555fc..1d622f2 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -340,9 +340,28 @@
bool must_wait, return_to_userland;
long blocking_state;
- BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
-
ret = VM_FAULT_SIGBUS;
+
+ /*
+ * We don't do userfault handling for the final child pid update.
+ *
+ * We also don't do userfault handling during
+ * coredumping. hugetlbfs has the special
+ * follow_hugetlb_page() to skip missing pages in the
+ * FOLL_DUMP case, anon memory also checks for FOLL_DUMP with
+ * the no_page_table() helper in follow_page_mask(), but the
+ * shmem_vm_ops->fault method is invoked even during
+ * coredumping without mmap_sem and it ends up here.
+ */
+ if (current->flags & (PF_EXITING|PF_DUMPCORE))
+ goto out;
+
+ /*
+ * Coredumping runs without mmap_sem so we can only check that
+ * the mmap_sem is held, if PF_DUMPCORE was not set.
+ */
+ WARN_ON_ONCE(!rwsem_is_locked(&mm->mmap_sem));
+
ctx = vmf->vma->vm_userfaultfd_ctx.ctx;
if (!ctx)
goto out;
@@ -361,12 +380,6 @@
goto out;
/*
- * We don't do userfault handling for the final child pid update.
- */
- if (current->flags & PF_EXITING)
- goto out;
-
- /*
* Check that we can return VM_FAULT_RETRY.
*
* NOTE: it should become possible to return VM_FAULT_RETRY
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index 07b77b7..16d6a57 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -117,7 +117,7 @@
__xfs_buf_ioacct_dec(
struct xfs_buf *bp)
{
- ASSERT(spin_is_locked(&bp->b_lock));
+ lockdep_assert_held(&bp->b_lock);
if (bp->b_state & XFS_BSTATE_IN_FLIGHT) {
bp->b_state &= ~XFS_BSTATE_IN_FLIGHT;
diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
index f61c84f8..990210f 100644
--- a/fs/xfs/xfs_icache.c
+++ b/fs/xfs/xfs_icache.c
@@ -66,7 +66,6 @@
XFS_STATS_INC(mp, vn_active);
ASSERT(atomic_read(&ip->i_pincount) == 0);
- ASSERT(!spin_is_locked(&ip->i_flags_lock));
ASSERT(!xfs_isiflocked(ip));
ASSERT(ip->i_ino == 0);
@@ -190,7 +189,7 @@
{
struct xfs_mount *mp = pag->pag_mount;
- ASSERT(spin_is_locked(&pag->pag_ici_lock));
+ lockdep_assert_held(&pag->pag_ici_lock);
if (pag->pag_ici_reclaimable++)
return;
@@ -212,7 +211,7 @@
{
struct xfs_mount *mp = pag->pag_mount;
- ASSERT(spin_is_locked(&pag->pag_ici_lock));
+ lockdep_assert_held(&pag->pag_ici_lock);
if (--pag->pag_ici_reclaimable)
return;
diff --git a/include/acpi/actbl.h b/include/acpi/actbl.h
index d92543f..bdc55c0 100644
--- a/include/acpi/actbl.h
+++ b/include/acpi/actbl.h
@@ -374,6 +374,20 @@
u16 validation_count;
};
+/*
+ * Maximum value of the validation_count field in struct acpi_table_desc.
+ * When reached, validation_count cannot be changed any more and the table will
+ * be permanently regarded as validated.
+ *
+ * This is to prevent situations in which unbalanced table get/put operations
+ * may cause premature table unmapping in the OS to happen.
+ *
+ * The maximum validation count can be defined to any value, but should be
+ * greater than the maximum number of OS early stage mapping slots to avoid
+ * leaking early stage table mappings to the late stage.
+ */
+#define ACPI_MAX_TABLE_VALIDATIONS ACPI_UINT16_MAX
+
/* Masks for Flags field above */
#define ACPI_TABLE_ORIGIN_EXTERNAL_VIRTUAL (0) /* Virtual address, external maintained */
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index ab92c4e..b74a3ed 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -586,6 +586,8 @@
size_t cmd_size;
void *rq_alloc_data;
+
+ struct work_struct release_work;
};
#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index 2174594..ec47101 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -48,6 +48,7 @@
CSS_ONLINE = (1 << 1), /* between ->css_online() and ->css_offline() */
CSS_RELEASED = (1 << 2), /* refcnt reached zero, released */
CSS_VISIBLE = (1 << 3), /* css is visible to userland */
+ CSS_DYING = (1 << 4), /* css is dying */
};
/* bits in struct cgroup flags field */
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index ed2573e..710a005 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -344,6 +344,26 @@
}
/**
+ * css_is_dying - test whether the specified css is dying
+ * @css: target css
+ *
+ * Test whether @css is in the process of offlining or already offline. In
+ * most cases, ->css_online() and ->css_offline() callbacks should be
+ * enough; however, the actual offline operations are RCU delayed and this
+ * test returns %true also when @css is scheduled to be offlined.
+ *
+ * This is useful, for example, when the use case requires synchronous
+ * behavior with respect to cgroup removal. cgroup removal schedules css
+ * offlining but the css can seem alive while the operation is being
+ * delayed. If the delay affects user visible semantics, this test can be
+ * used to resolve the situation.
+ */
+static inline bool css_is_dying(struct cgroup_subsys_state *css)
+{
+ return !(css->flags & CSS_NO_REF) && percpu_ref_is_dying(&css->refcnt);
+}
+
+/**
* css_put - put a css reference
* @css: target css
*
diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h
index de17999..d614c5e 100644
--- a/include/linux/compiler-clang.h
+++ b/include/linux/compiler-clang.h
@@ -15,3 +15,11 @@
* with any version that can compile the kernel
*/
#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
+
+/*
+ * GCC does not warn about unused static inline functions for
+ * -Wunused-function. This turns out to avoid the need for complex #ifdef
+ * directives. Suppress the warning in clang as well.
+ */
+#undef inline
+#define inline inline __attribute__((unused)) notrace
diff --git a/include/linux/configfs.h b/include/linux/configfs.h
index 2319b8c..c967090 100644
--- a/include/linux/configfs.h
+++ b/include/linux/configfs.h
@@ -74,7 +74,8 @@
const char *name,
struct config_item_type *type);
-extern struct config_item * config_item_get(struct config_item *);
+extern struct config_item *config_item_get(struct config_item *);
+extern struct config_item *config_item_get_unless_zero(struct config_item *);
extern void config_item_put(struct config_item *);
struct config_item_type {
diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h
index 4eac267..92f2083 100644
--- a/include/linux/dma-iommu.h
+++ b/include/linux/dma-iommu.h
@@ -78,6 +78,7 @@
struct iommu_domain;
struct msi_msg;
+struct device;
static inline int iommu_dma_init(void)
{
diff --git a/include/linux/dmi.h b/include/linux/dmi.h
index 5e9c74c..9bbf21a 100644
--- a/include/linux/dmi.h
+++ b/include/linux/dmi.h
@@ -136,7 +136,7 @@
static inline int dmi_name_in_serial(const char *s) { return 0; }
#define dmi_available 0
static inline int dmi_walk(void (*decode)(const struct dmi_header *, void *),
- void *private_data) { return -1; }
+ void *private_data) { return -ENXIO; }
static inline bool dmi_match(enum dmi_field f, const char *str)
{ return false; }
static inline void dmi_memdev_name(u16 handle, const char **bank,
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index 9ec5e22..0e306c5 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -153,7 +153,7 @@
#endif
/* managed by elevator core */
- char icq_cache_name[ELV_NAME_MAX + 5]; /* elvname + "_io_cq" */
+ char icq_cache_name[ELV_NAME_MAX + 6]; /* elvname + "_io_cq" */
struct list_head list;
};
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index fffb912..1fa293a 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -417,6 +417,10 @@
#define ICH_HCR_EN (1 << 0)
#define ICH_HCR_UIE (1 << 1)
+#define ICH_VMCR_ACK_CTL_SHIFT 2
+#define ICH_VMCR_ACK_CTL_MASK (1 << ICH_VMCR_ACK_CTL_SHIFT)
+#define ICH_VMCR_FIQ_EN_SHIFT 3
+#define ICH_VMCR_FIQ_EN_MASK (1 << ICH_VMCR_FIQ_EN_SHIFT)
#define ICH_VMCR_CBPR_SHIFT 4
#define ICH_VMCR_CBPR_MASK (1 << ICH_VMCR_CBPR_SHIFT)
#define ICH_VMCR_EOIM_SHIFT 9
diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
index dc30f3d..d3453ee 100644
--- a/include/linux/irqchip/arm-gic.h
+++ b/include/linux/irqchip/arm-gic.h
@@ -25,7 +25,18 @@
#define GICC_ENABLE 0x1
#define GICC_INT_PRI_THRESHOLD 0xf0
-#define GIC_CPU_CTRL_EOImodeNS (1 << 9)
+#define GIC_CPU_CTRL_EnableGrp0_SHIFT 0
+#define GIC_CPU_CTRL_EnableGrp0 (1 << GIC_CPU_CTRL_EnableGrp0_SHIFT)
+#define GIC_CPU_CTRL_EnableGrp1_SHIFT 1
+#define GIC_CPU_CTRL_EnableGrp1 (1 << GIC_CPU_CTRL_EnableGrp1_SHIFT)
+#define GIC_CPU_CTRL_AckCtl_SHIFT 2
+#define GIC_CPU_CTRL_AckCtl (1 << GIC_CPU_CTRL_AckCtl_SHIFT)
+#define GIC_CPU_CTRL_FIQEn_SHIFT 3
+#define GIC_CPU_CTRL_FIQEn (1 << GIC_CPU_CTRL_FIQEn_SHIFT)
+#define GIC_CPU_CTRL_CBPR_SHIFT 4
+#define GIC_CPU_CTRL_CBPR (1 << GIC_CPU_CTRL_CBPR_SHIFT)
+#define GIC_CPU_CTRL_EOImodeNS_SHIFT 9
+#define GIC_CPU_CTRL_EOImodeNS (1 << GIC_CPU_CTRL_EOImodeNS_SHIFT)
#define GICC_IAR_INT_ID_MASK 0x3ff
#define GICC_INT_SPURIOUS 1023
@@ -84,8 +95,19 @@
#define GICH_LR_EOI (1 << 19)
#define GICH_LR_HW (1 << 31)
-#define GICH_VMCR_CTRL_SHIFT 0
-#define GICH_VMCR_CTRL_MASK (0x21f << GICH_VMCR_CTRL_SHIFT)
+#define GICH_VMCR_ENABLE_GRP0_SHIFT 0
+#define GICH_VMCR_ENABLE_GRP0_MASK (1 << GICH_VMCR_ENABLE_GRP0_SHIFT)
+#define GICH_VMCR_ENABLE_GRP1_SHIFT 1
+#define GICH_VMCR_ENABLE_GRP1_MASK (1 << GICH_VMCR_ENABLE_GRP1_SHIFT)
+#define GICH_VMCR_ACK_CTL_SHIFT 2
+#define GICH_VMCR_ACK_CTL_MASK (1 << GICH_VMCR_ACK_CTL_SHIFT)
+#define GICH_VMCR_FIQ_EN_SHIFT 3
+#define GICH_VMCR_FIQ_EN_MASK (1 << GICH_VMCR_FIQ_EN_SHIFT)
+#define GICH_VMCR_CBPR_SHIFT 4
+#define GICH_VMCR_CBPR_MASK (1 << GICH_VMCR_CBPR_SHIFT)
+#define GICH_VMCR_EOI_MODE_SHIFT 9
+#define GICH_VMCR_EOI_MODE_MASK (1 << GICH_VMCR_EOI_MODE_SHIFT)
+
#define GICH_VMCR_PRIMASK_SHIFT 27
#define GICH_VMCR_PRIMASK_MASK (0x1f << GICH_VMCR_PRIMASK_SHIFT)
#define GICH_VMCR_BINPOINT_SHIFT 21
diff --git a/include/linux/key.h b/include/linux/key.h
index 0c9b93b..78e25aa 100644
--- a/include/linux/key.h
+++ b/include/linux/key.h
@@ -173,7 +173,6 @@
#ifdef KEY_DEBUGGING
unsigned magic;
#define KEY_DEBUG_MAGIC 0x18273645u
-#define KEY_DEBUG_MAGIC_X 0xf8e9dacbu
#endif
unsigned long flags; /* status flags (change with bitops) */
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h
index b4ee8f6..8e2828d 100644
--- a/include/linux/mlx4/qp.h
+++ b/include/linux/mlx4/qp.h
@@ -470,6 +470,7 @@
u16 rate_val;
};
+struct mlx4_qp *mlx4_qp_lookup(struct mlx4_dev *dev, u32 qpn);
int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn,
enum mlx4_update_qp_attr attr,
struct mlx4_update_qp_params *params);
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 32de072..edafedb 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -766,6 +766,12 @@
MLX5_CAP_PORT_TYPE_ETH = 0x1,
};
+enum {
+ MLX5_CAP_UMR_FENCE_STRONG = 0x0,
+ MLX5_CAP_UMR_FENCE_SMALL = 0x1,
+ MLX5_CAP_UMR_FENCE_NONE = 0x2,
+};
+
struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_0[0x80];
@@ -875,7 +881,9 @@
u8 reserved_at_202[0x1];
u8 ipoib_enhanced_offloads[0x1];
u8 ipoib_basic_offloads[0x1];
- u8 reserved_at_205[0xa];
+ u8 reserved_at_205[0x5];
+ u8 umr_fence[0x2];
+ u8 reserved_at_20c[0x3];
u8 drain_sigerr[0x1];
u8 cmdif_checksum[0x2];
u8 sigerr_cqe[0x1];
diff --git a/include/linux/mm.h b/include/linux/mm.h
index b892e95..6f543a4 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1393,12 +1393,6 @@
int get_cmdline(struct task_struct *task, char *buffer, int buflen);
-/* Is the vma a continuation of the stack vma above it? */
-static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
-{
- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
-}
-
static inline bool vma_is_anonymous(struct vm_area_struct *vma)
{
return !vma->vm_ops;
@@ -1414,28 +1408,6 @@
static inline bool vma_is_shmem(struct vm_area_struct *vma) { return false; }
#endif
-static inline int stack_guard_page_start(struct vm_area_struct *vma,
- unsigned long addr)
-{
- return (vma->vm_flags & VM_GROWSDOWN) &&
- (vma->vm_start == addr) &&
- !vma_growsdown(vma->vm_prev, addr);
-}
-
-/* Is the vma a continuation of the stack vma below it? */
-static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
-{
- return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
-}
-
-static inline int stack_guard_page_end(struct vm_area_struct *vma,
- unsigned long addr)
-{
- return (vma->vm_flags & VM_GROWSUP) &&
- (vma->vm_end == addr) &&
- !vma_growsup(vma->vm_next, addr);
-}
-
int vma_is_stack_for_current(struct vm_area_struct *vma);
extern unsigned long move_page_tables(struct vm_area_struct *vma,
@@ -2222,6 +2194,7 @@
pgoff_t offset,
unsigned long size);
+extern unsigned long stack_guard_gap;
/* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */
extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
@@ -2250,6 +2223,30 @@
return vma;
}
+static inline unsigned long vm_start_gap(struct vm_area_struct *vma)
+{
+ unsigned long vm_start = vma->vm_start;
+
+ if (vma->vm_flags & VM_GROWSDOWN) {
+ vm_start -= stack_guard_gap;
+ if (vm_start > vma->vm_start)
+ vm_start = 0;
+ }
+ return vm_start;
+}
+
+static inline unsigned long vm_end_gap(struct vm_area_struct *vma)
+{
+ unsigned long vm_end = vma->vm_end;
+
+ if (vma->vm_flags & VM_GROWSUP) {
+ vm_end += stack_guard_gap;
+ if (vm_end < vma->vm_end)
+ vm_end = -PAGE_SIZE;
+ }
+ return vm_end;
+}
+
static inline unsigned long vma_pages(struct vm_area_struct *vma)
{
return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 3f39d27..4ed952c 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -914,8 +914,7 @@
*
* int (*ndo_change_mtu)(struct net_device *dev, int new_mtu);
* Called when a user wants to change the Maximum Transfer Unit
- * of a device. If not defined, any request to change MTU will
- * will return an error.
+ * of a device.
*
* void (*ndo_tx_timeout)(struct net_device *dev);
* Callback used when the transmitter has not made any progress
@@ -1596,8 +1595,8 @@
* @rtnl_link_state: This enum represents the phases of creating
* a new link
*
- * @destructor: Called from unregister,
- * can be used to call free_netdev
+ * @needs_free_netdev: Should unregister perform free_netdev?
+ * @priv_destructor: Called from unregister
* @npinfo: XXX: need comments on this one
* @nd_net: Network namespace this network device is inside
*
@@ -1858,7 +1857,8 @@
RTNL_LINK_INITIALIZING,
} rtnl_link_state:16;
- void (*destructor)(struct net_device *dev);
+ bool needs_free_netdev;
+ void (*priv_destructor)(struct net_device *dev);
#ifdef CONFIG_NETPOLL
struct netpoll_info __rcu *npinfo;
@@ -4261,6 +4261,11 @@
return dev->name;
}
+static inline bool netdev_unregistering(const struct net_device *dev)
+{
+ return dev->reg_state == NETREG_UNREGISTERING;
+}
+
static inline const char *netdev_reg_state(const struct net_device *dev)
{
switch (dev->reg_state) {
diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
index 9c6f768..dda22f4 100644
--- a/include/linux/quotaops.h
+++ b/include/linux/quotaops.h
@@ -44,6 +44,7 @@
void inode_reclaim_rsv_space(struct inode *inode, qsize_t number);
int dquot_initialize(struct inode *inode);
+bool dquot_initialize_needed(struct inode *inode);
void dquot_drop(struct inode *inode);
struct dquot *dqget(struct super_block *sb, struct kqid qid);
static inline struct dquot *dqgrab(struct dquot *dquot)
@@ -207,6 +208,11 @@
return 0;
}
+static inline bool dquot_initialize_needed(struct inode *inode)
+{
+ return false;
+}
+
static inline void dquot_drop(struct inode *inode)
{
}
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index 167ad88..4c1d5f7 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -172,9 +172,7 @@
{
int retval;
- preempt_disable();
retval = __srcu_read_lock(sp);
- preempt_enable();
rcu_lock_acquire(&(sp)->dep_map);
return retval;
}
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 0b1cf32..d971837 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -189,8 +189,6 @@
struct platform_freeze_ops {
int (*begin)(void);
int (*prepare)(void);
- void (*wake)(void);
- void (*sync)(void);
void (*restore)(void);
void (*end)(void);
};
@@ -430,8 +428,7 @@
extern bool pm_wakeup_pending(void);
extern void pm_system_wakeup(void);
-extern void pm_system_cancel_wakeup(void);
-extern void pm_wakeup_clear(bool reset);
+extern void pm_wakeup_clear(void);
extern void pm_system_irq_wakeup(unsigned int irq_number);
extern bool pm_get_wakeup_count(unsigned int *count, bool block);
extern bool pm_save_wakeup_count(unsigned int count);
@@ -481,7 +478,7 @@
static inline bool pm_wakeup_pending(void) { return false; }
static inline void pm_system_wakeup(void) {}
-static inline void pm_wakeup_clear(bool reset) {}
+static inline void pm_wakeup_clear(void) {}
static inline void pm_system_irq_wakeup(unsigned int irq_number) {}
static inline void lock_system_sleep(void) {}
diff --git a/include/linux/timekeeper_internal.h b/include/linux/timekeeper_internal.h
index 110f453..f7043cc 100644
--- a/include/linux/timekeeper_internal.h
+++ b/include/linux/timekeeper_internal.h
@@ -29,7 +29,6 @@
*/
struct tk_read_base {
struct clocksource *clock;
- u64 (*read)(struct clocksource *cs);
u64 mask;
u64 cycle_last;
u32 mult;
@@ -58,7 +57,7 @@
* interval.
* @xtime_remainder: Shifted nano seconds left over when rounding
* @cycle_interval
- * @raw_interval: Raw nano seconds accumulated per NTP interval.
+ * @raw_interval: Shifted raw nano seconds accumulated per NTP interval.
* @ntp_error: Difference between accumulated time and NTP time in ntp
* shifted nano seconds.
* @ntp_error_shift: Shift conversion between clock shifted nano seconds and
@@ -100,7 +99,7 @@
u64 cycle_interval;
u64 xtime_interval;
s64 xtime_remainder;
- u32 raw_interval;
+ u64 raw_interval;
/* The ntp_tick_length() value currently being used.
* This cached copy ensures we consistently apply the tick
* length for an entire tick, as ntp_tick_length may change
diff --git a/include/media/cec-notifier.h b/include/media/cec-notifier.h
index eb50ce5..298f996 100644
--- a/include/media/cec-notifier.h
+++ b/include/media/cec-notifier.h
@@ -29,7 +29,7 @@
struct cec_adapter;
struct cec_notifier;
-#ifdef CONFIG_MEDIA_CEC_NOTIFIER
+#if IS_REACHABLE(CONFIG_CEC_CORE) && IS_ENABLED(CONFIG_CEC_NOTIFIER)
/**
* cec_notifier_get - find or create a new cec_notifier for the given device.
@@ -106,6 +106,16 @@
{
}
+static inline void cec_notifier_register(struct cec_notifier *n,
+ struct cec_adapter *adap,
+ void (*callback)(struct cec_adapter *adap, u16 pa))
+{
+}
+
+static inline void cec_notifier_unregister(struct cec_notifier *n)
+{
+}
+
#endif
#endif
diff --git a/include/media/cec.h b/include/media/cec.h
index b8eb895..201f060 100644
--- a/include/media/cec.h
+++ b/include/media/cec.h
@@ -173,7 +173,7 @@
bool passthrough;
struct cec_log_addrs log_addrs;
-#ifdef CONFIG_MEDIA_CEC_NOTIFIER
+#ifdef CONFIG_CEC_NOTIFIER
struct cec_notifier *notifier;
#endif
@@ -206,7 +206,7 @@
#define cec_phys_addr_exp(pa) \
((pa) >> 12), ((pa) >> 8) & 0xf, ((pa) >> 4) & 0xf, (pa) & 0xf
-#if IS_ENABLED(CONFIG_CEC_CORE)
+#if IS_REACHABLE(CONFIG_CEC_CORE)
struct cec_adapter *cec_allocate_adapter(const struct cec_adap_ops *ops,
void *priv, const char *name, u32 caps, u8 available_las);
int cec_register_adapter(struct cec_adapter *adap, struct device *parent);
@@ -300,7 +300,7 @@
*/
int cec_phys_addr_validate(u16 phys_addr, u16 *parent, u16 *port);
-#ifdef CONFIG_MEDIA_CEC_NOTIFIER
+#ifdef CONFIG_CEC_NOTIFIER
void cec_register_cec_notifier(struct cec_adapter *adap,
struct cec_notifier *notifier);
#endif
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index dbf0abb..3e505bb 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -1007,6 +1007,7 @@
*/
extern const struct proto_ops inet6_stream_ops;
extern const struct proto_ops inet6_dgram_ops;
+extern const struct proto_ops inet6_sockraw_ops;
struct group_source_req;
struct group_filter;
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 38a7427..be6223c 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -924,7 +924,7 @@
void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev);
/* call when ack arrives (optional) */
void (*in_ack_event)(struct sock *sk, u32 flags);
- /* new value of cwnd after loss (optional) */
+ /* new value of cwnd after loss (required) */
u32 (*undo_cwnd)(struct sock *sk);
/* hook for packet ack accounting (optional) */
void (*pkts_acked)(struct sock *sk, const struct ack_sample *sample);
diff --git a/include/rdma/ib_sa.h b/include/rdma/ib_sa.h
index f5f70e3..355b81f 100644
--- a/include/rdma/ib_sa.h
+++ b/include/rdma/ib_sa.h
@@ -158,7 +158,6 @@
};
struct sa_path_rec_ib {
- __be64 service_id;
__be16 dlid;
__be16 slid;
u8 raw_traffic;
@@ -174,7 +173,6 @@
};
struct sa_path_rec_opa {
- __be64 service_id;
__be32 dlid;
__be32 slid;
u8 raw_traffic;
@@ -189,6 +187,7 @@
struct sa_path_rec {
union ib_gid dgid;
union ib_gid sgid;
+ __be64 service_id;
/* reserved */
__be32 flow_label;
u8 hop_limit;
@@ -262,7 +261,7 @@
ib->ib.dlid = htons(ntohl(opa->opa.dlid));
ib->ib.slid = htons(ntohl(opa->opa.slid));
}
- ib->ib.service_id = opa->opa.service_id;
+ ib->service_id = opa->service_id;
ib->ib.raw_traffic = opa->opa.raw_traffic;
}
@@ -281,7 +280,7 @@
}
opa->opa.slid = slid;
opa->opa.dlid = dlid;
- opa->opa.service_id = ib->ib.service_id;
+ opa->service_id = ib->service_id;
opa->opa.raw_traffic = ib->ib.raw_traffic;
}
@@ -591,15 +590,6 @@
(rec->rec_type == SA_PATH_REC_TYPE_ROCE_V2));
}
-static inline void sa_path_set_service_id(struct sa_path_rec *rec,
- __be64 service_id)
-{
- if (rec->rec_type == SA_PATH_REC_TYPE_IB)
- rec->ib.service_id = service_id;
- else if (rec->rec_type == SA_PATH_REC_TYPE_OPA)
- rec->opa.service_id = service_id;
-}
-
static inline void sa_path_set_slid(struct sa_path_rec *rec, __be32 slid)
{
if (rec->rec_type == SA_PATH_REC_TYPE_IB)
@@ -625,15 +615,6 @@
rec->opa.raw_traffic = raw_traffic;
}
-static inline __be64 sa_path_get_service_id(struct sa_path_rec *rec)
-{
- if (rec->rec_type == SA_PATH_REC_TYPE_IB)
- return rec->ib.service_id;
- else if (rec->rec_type == SA_PATH_REC_TYPE_OPA)
- return rec->opa.service_id;
- return 0;
-}
-
static inline __be32 sa_path_get_slid(struct sa_path_rec *rec)
{
if (rec->rec_type == SA_PATH_REC_TYPE_IB)
diff --git a/include/rdma/rdma_netlink.h b/include/rdma/rdma_netlink.h
index 5852661..348c102 100644
--- a/include/rdma/rdma_netlink.h
+++ b/include/rdma/rdma_netlink.h
@@ -10,9 +10,6 @@
struct module *module;
};
-int ibnl_init(void);
-void ibnl_cleanup(void);
-
/**
* Add a a client to the list of IB netlink exporters.
* @index: Index of the added client
@@ -77,11 +74,4 @@
int ibnl_multicast(struct sk_buff *skb, struct nlmsghdr *nlh,
unsigned int group, gfp_t flags);
-/**
- * Check if there are any listeners to the netlink group
- * @group: the netlink group ID
- * Returns 0 on success or a negative for no listeners.
- */
-int ibnl_chk_listeners(unsigned int group);
-
#endif /* _RDMA_NETLINK_H */
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
index d179d77..7d4a594 100644
--- a/include/uapi/linux/ethtool.h
+++ b/include/uapi/linux/ethtool.h
@@ -1486,8 +1486,10 @@
* it was forced up into this mode or autonegotiated.
*/
-/* The forced speed, in units of 1Mb. All values 0 to INT_MAX are legal. */
-/* Update drivers/net/phy/phy.c:phy_speed_to_str() when adding new values */
+/* The forced speed, in units of 1Mb. All values 0 to INT_MAX are legal.
+ * Update drivers/net/phy/phy.c:phy_speed_to_str() and
+ * drivers/net/bonding/bond_3ad.c:__get_link_speed() when adding new values.
+ */
#define SPEED_10 10
#define SPEED_100 100
#define SPEED_1000 1000
diff --git a/include/uapi/linux/keyctl.h b/include/uapi/linux/keyctl.h
index 201c6644..ef16df0 100644
--- a/include/uapi/linux/keyctl.h
+++ b/include/uapi/linux/keyctl.h
@@ -70,8 +70,8 @@
};
struct keyctl_kdf_params {
- char *hashname;
- char *otherinfo;
+ char __user *hashname;
+ char __user *otherinfo;
__u32 otherinfolen;
__u32 __spare[8];
};
diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h
index 61b7d36..156ee4c 100644
--- a/include/uapi/linux/openvswitch.h
+++ b/include/uapi/linux/openvswitch.h
@@ -343,6 +343,7 @@
#define OVS_KEY_ATTR_MAX (__OVS_KEY_ATTR_MAX - 1)
enum ovs_tunnel_key_attr {
+ /* OVS_TUNNEL_KEY_ATTR_NONE, standard nl API requires this attribute! */
OVS_TUNNEL_KEY_ATTR_ID, /* be64 Tunnel ID */
OVS_TUNNEL_KEY_ATTR_IPV4_SRC, /* be32 src IP address. */
OVS_TUNNEL_KEY_ATTR_IPV4_DST, /* be32 dst IP address. */
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
index c3c9a0e..8d4e85e 100644
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -4265,6 +4265,11 @@
{
lockdep_assert_held(&cgroup_mutex);
+ if (css->flags & CSS_DYING)
+ return;
+
+ css->flags |= CSS_DYING;
+
/*
* This must happen before css is disassociated with its cgroup.
* See seq_css() for details.
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
index f6501f4..ae64341 100644
--- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c
@@ -176,9 +176,9 @@
} cpuset_flagbits_t;
/* convenient tests for these bits */
-static inline bool is_cpuset_online(const struct cpuset *cs)
+static inline bool is_cpuset_online(struct cpuset *cs)
{
- return test_bit(CS_ONLINE, &cs->flags);
+ return test_bit(CS_ONLINE, &cs->flags) && !css_is_dying(&cs->css);
}
static inline int is_cpu_exclusive(const struct cpuset *cs)
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 9ae6fbe..cb51034 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -1658,13 +1658,13 @@
ret = !sp->name || sp->cant_stop ? -EINVAL : 0;
mutex_unlock(&cpuhp_state_mutex);
if (ret)
- return ret;
+ goto out;
if (st->state < target)
ret = do_cpu_up(dev->id, target);
else
ret = do_cpu_down(dev->id, target);
-
+out:
unlock_device_hotplug();
return ret ? ret : count;
}
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 6e75a5c..6c4e523 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -7316,6 +7316,21 @@
return __perf_event_account_interrupt(event, 1);
}
+static bool sample_is_allowed(struct perf_event *event, struct pt_regs *regs)
+{
+ /*
+ * Due to interrupt latency (AKA "skid"), we may enter the
+ * kernel before taking an overflow, even if the PMU is only
+ * counting user events.
+ * To avoid leaking information to userspace, we must always
+ * reject kernel samples when exclude_kernel is set.
+ */
+ if (event->attr.exclude_kernel && !user_mode(regs))
+ return false;
+
+ return true;
+}
+
/*
* Generic event overflow handling, sampling.
*/
@@ -7337,6 +7352,12 @@
ret = __perf_event_account_interrupt(event, throttle);
/*
+ * For security, drop the skid kernel samples if necessary.
+ */
+ if (!sample_is_allowed(event, regs))
+ return ret;
+
+ /*
* XXX event_limit might not quite work as expected on inherited
* events
*/
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 070be98..425170d 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -1312,8 +1312,10 @@
ret = __irq_set_trigger(desc,
new->flags & IRQF_TRIGGER_MASK);
- if (ret)
+ if (ret) {
+ irq_release_resources(desc);
goto out_mask;
+ }
}
desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
diff --git a/kernel/power/process.c b/kernel/power/process.c
index 78672d3..c7209f0 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -132,7 +132,7 @@
if (!pm_freezing)
atomic_inc(&system_freezing_cnt);
- pm_wakeup_clear(true);
+ pm_wakeup_clear();
pr_info("Freezing user space processes ... ");
pm_freezing = true;
error = try_to_freeze_tasks(true);
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index c0248c7..15e6bae 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -72,8 +72,6 @@
static void freeze_enter(void)
{
- trace_suspend_resume(TPS("machine_suspend"), PM_SUSPEND_FREEZE, true);
-
spin_lock_irq(&suspend_freeze_lock);
if (pm_wakeup_pending())
goto out;
@@ -100,27 +98,6 @@
out:
suspend_freeze_state = FREEZE_STATE_NONE;
spin_unlock_irq(&suspend_freeze_lock);
-
- trace_suspend_resume(TPS("machine_suspend"), PM_SUSPEND_FREEZE, false);
-}
-
-static void s2idle_loop(void)
-{
- do {
- freeze_enter();
-
- if (freeze_ops && freeze_ops->wake)
- freeze_ops->wake();
-
- dpm_resume_noirq(PMSG_RESUME);
- if (freeze_ops && freeze_ops->sync)
- freeze_ops->sync();
-
- if (pm_wakeup_pending())
- break;
-
- pm_wakeup_clear(false);
- } while (!dpm_suspend_noirq(PMSG_SUSPEND));
}
void freeze_wake(void)
@@ -394,8 +371,10 @@
* all the devices are suspended.
*/
if (state == PM_SUSPEND_FREEZE) {
- s2idle_loop();
- goto Platform_early_resume;
+ trace_suspend_resume(TPS("machine_suspend"), state, true);
+ freeze_enter();
+ trace_suspend_resume(TPS("machine_suspend"), state, false);
+ goto Platform_wake;
}
error = disable_nonboot_cpus();
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index a1aecf4..a1db38a 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -269,7 +269,6 @@
#define MAX_CMDLINECONSOLES 8
static struct console_cmdline console_cmdline[MAX_CMDLINECONSOLES];
-static int console_cmdline_cnt;
static int preferred_console = -1;
int console_set_on_cmdline;
@@ -1906,25 +1905,12 @@
* See if this tty is not yet registered, and
* if we have a slot free.
*/
- for (i = 0, c = console_cmdline; i < console_cmdline_cnt; i++, c++) {
+ for (i = 0, c = console_cmdline;
+ i < MAX_CMDLINECONSOLES && c->name[0];
+ i++, c++) {
if (strcmp(c->name, name) == 0 && c->index == idx) {
- if (brl_options)
- return 0;
-
- /*
- * Maintain an invariant that will help to find if
- * the matching console is preferred, see
- * register_console():
- *
- * The last non-braille console is always
- * the preferred one.
- */
- if (i != console_cmdline_cnt - 1)
- swap(console_cmdline[i],
- console_cmdline[console_cmdline_cnt - 1]);
-
- preferred_console = console_cmdline_cnt - 1;
-
+ if (!brl_options)
+ preferred_console = i;
return 0;
}
}
@@ -1937,7 +1923,6 @@
braille_set_options(c, brl_options);
c->index = idx;
- console_cmdline_cnt++;
return 0;
}
/*
@@ -2477,23 +2462,12 @@
}
/*
- * See if this console matches one we selected on the command line.
- *
- * There may be several entries in the console_cmdline array matching
- * with the same console, one with newcon->match(), another by
- * name/index:
- *
- * pl011,mmio,0x87e024000000,115200 -- added from SPCR
- * ttyAMA0 -- added from command line
- *
- * Traverse the console_cmdline array in reverse order to be
- * sure that if this console is preferred then it will be the first
- * matching entry. We use the invariant that is maintained in
- * __add_preferred_console().
+ * See if this console matches one we selected on
+ * the command line.
*/
- for (i = console_cmdline_cnt - 1; i >= 0; i--) {
- c = console_cmdline + i;
-
+ for (i = 0, c = console_cmdline;
+ i < MAX_CMDLINECONSOLES && c->name[0];
+ i++, c++) {
if (!newcon->match ||
newcon->match(newcon, c->name, c->index, c->options) != 0) {
/* default matching */
diff --git a/kernel/rcu/srcu.c b/kernel/rcu/srcu.c
index 584d8a9..dea0361 100644
--- a/kernel/rcu/srcu.c
+++ b/kernel/rcu/srcu.c
@@ -263,7 +263,7 @@
/*
* Counts the new reader in the appropriate per-CPU element of the
- * srcu_struct. Must be called from process context.
+ * srcu_struct.
* Returns an index that must be passed to the matching srcu_read_unlock().
*/
int __srcu_read_lock(struct srcu_struct *sp)
@@ -271,7 +271,7 @@
int idx;
idx = READ_ONCE(sp->completed) & 0x1;
- __this_cpu_inc(sp->per_cpu_ref->lock_count[idx]);
+ this_cpu_inc(sp->per_cpu_ref->lock_count[idx]);
smp_mb(); /* B */ /* Avoid leaking the critical section. */
return idx;
}
@@ -281,7 +281,6 @@
* Removes the count for the old reader from the appropriate per-CPU
* element of the srcu_struct. Note that this may well be a different
* CPU than that which was incremented by the corresponding srcu_read_lock().
- * Must be called from process context.
*/
void __srcu_read_unlock(struct srcu_struct *sp, int idx)
{
diff --git a/kernel/rcu/srcutiny.c b/kernel/rcu/srcutiny.c
index 36e1f82..32798eb 100644
--- a/kernel/rcu/srcutiny.c
+++ b/kernel/rcu/srcutiny.c
@@ -97,8 +97,9 @@
/*
* Counts the new reader in the appropriate per-CPU element of the
- * srcu_struct. Must be called from process context.
- * Returns an index that must be passed to the matching srcu_read_unlock().
+ * srcu_struct. Can be invoked from irq/bh handlers, but the matching
+ * __srcu_read_unlock() must be in the same handler instance. Returns an
+ * index that must be passed to the matching srcu_read_unlock().
*/
int __srcu_read_lock(struct srcu_struct *sp)
{
@@ -112,7 +113,7 @@
/*
* Removes the count for the old reader from the appropriate element of
- * the srcu_struct. Must be called from process context.
+ * the srcu_struct.
*/
void __srcu_read_unlock(struct srcu_struct *sp, int idx)
{
diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c
index 3ae8474..157654f 100644
--- a/kernel/rcu/srcutree.c
+++ b/kernel/rcu/srcutree.c
@@ -357,7 +357,7 @@
/*
* Counts the new reader in the appropriate per-CPU element of the
- * srcu_struct. Must be called from process context.
+ * srcu_struct.
* Returns an index that must be passed to the matching srcu_read_unlock().
*/
int __srcu_read_lock(struct srcu_struct *sp)
@@ -365,7 +365,7 @@
int idx;
idx = READ_ONCE(sp->srcu_idx) & 0x1;
- __this_cpu_inc(sp->sda->srcu_lock_count[idx]);
+ this_cpu_inc(sp->sda->srcu_lock_count[idx]);
smp_mb(); /* B */ /* Avoid leaking the critical section. */
return idx;
}
@@ -375,7 +375,6 @@
* Removes the count for the old reader from the appropriate per-CPU
* element of the srcu_struct. Note that this may well be a different
* CPU than that which was incremented by the corresponding srcu_read_lock().
- * Must be called from process context.
*/
void __srcu_read_unlock(struct srcu_struct *sp, int idx)
{
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 803c3bc..326d4f8 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -5605,7 +5605,7 @@
BUG_ON(cpu_online(smp_processor_id()));
if (mm != &init_mm) {
- switch_mm_irqs_off(mm, &init_mm, current);
+ switch_mm(mm, &init_mm, current);
finish_arch_post_lock_switch();
}
mmdrop(mm);
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index 622eed1..076a2e3 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -101,9 +101,6 @@
if (sg_policy->next_freq == next_freq)
return;
- if (sg_policy->next_freq > next_freq)
- next_freq = (sg_policy->next_freq + next_freq) >> 1;
-
sg_policy->next_freq = next_freq;
sg_policy->last_freq_update_time = time;
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index d711093..c77e4b1 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3563,7 +3563,7 @@
trace_sched_stat_runtime_enabled()) {
printk_deferred_once("Scheduler tracepoints stat_sleep, stat_iowait, "
"stat_blocked and stat_runtime require the "
- "kernel parameter schedstats=enabled or "
+ "kernel parameter schedstats=enable or "
"kernel.sched_schedstats=1\n");
}
#endif
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index 987e496..b398c2e 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -37,9 +37,11 @@
static __cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(tick_broadcast_lock);
#ifdef CONFIG_TICK_ONESHOT
+static void tick_broadcast_setup_oneshot(struct clock_event_device *bc);
static void tick_broadcast_clear_oneshot(int cpu);
static void tick_resume_broadcast_oneshot(struct clock_event_device *bc);
#else
+static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc) { BUG(); }
static inline void tick_broadcast_clear_oneshot(int cpu) { }
static inline void tick_resume_broadcast_oneshot(struct clock_event_device *bc) { }
#endif
@@ -867,7 +869,7 @@
/**
* tick_broadcast_setup_oneshot - setup the broadcast device
*/
-void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
+static void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
{
int cpu = smp_processor_id();
diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h
index f738251..be0ac01 100644
--- a/kernel/time/tick-internal.h
+++ b/kernel/time/tick-internal.h
@@ -126,7 +126,6 @@
/* Functions related to oneshot broadcasting */
#if defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) && defined(CONFIG_TICK_ONESHOT)
-extern void tick_broadcast_setup_oneshot(struct clock_event_device *bc);
extern void tick_broadcast_switch_to_oneshot(void);
extern void tick_shutdown_broadcast_oneshot(unsigned int cpu);
extern int tick_broadcast_oneshot_active(void);
@@ -134,7 +133,6 @@
bool tick_broadcast_oneshot_available(void);
extern struct cpumask *tick_get_broadcast_oneshot_mask(void);
#else /* !(BROADCAST && ONESHOT): */
-static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc) { BUG(); }
static inline void tick_broadcast_switch_to_oneshot(void) { }
static inline void tick_shutdown_broadcast_oneshot(unsigned int cpu) { }
static inline int tick_broadcast_oneshot_active(void) { return 0; }
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 9652bc5..b602c48 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -118,6 +118,26 @@
tk->offs_boot = ktime_add(tk->offs_boot, delta);
}
+/*
+ * tk_clock_read - atomic clocksource read() helper
+ *
+ * This helper is necessary to use in the read paths because, while the
+ * seqlock ensures we don't return a bad value while structures are updated,
+ * it doesn't protect from potential crashes. There is the possibility that
+ * the tkr's clocksource may change between the read reference, and the
+ * clock reference passed to the read function. This can cause crashes if
+ * the wrong clocksource is passed to the wrong read function.
+ * This isn't necessary to use when holding the timekeeper_lock or doing
+ * a read of the fast-timekeeper tkrs (which is protected by its own locking
+ * and update logic).
+ */
+static inline u64 tk_clock_read(struct tk_read_base *tkr)
+{
+ struct clocksource *clock = READ_ONCE(tkr->clock);
+
+ return clock->read(clock);
+}
+
#ifdef CONFIG_DEBUG_TIMEKEEPING
#define WARNING_FREQ (HZ*300) /* 5 minute rate-limiting */
@@ -175,7 +195,7 @@
*/
do {
seq = read_seqcount_begin(&tk_core.seq);
- now = tkr->read(tkr->clock);
+ now = tk_clock_read(tkr);
last = tkr->cycle_last;
mask = tkr->mask;
max = tkr->clock->max_cycles;
@@ -209,7 +229,7 @@
u64 cycle_now, delta;
/* read clocksource */
- cycle_now = tkr->read(tkr->clock);
+ cycle_now = tk_clock_read(tkr);
/* calculate the delta since the last update_wall_time */
delta = clocksource_delta(cycle_now, tkr->cycle_last, tkr->mask);
@@ -238,12 +258,10 @@
++tk->cs_was_changed_seq;
old_clock = tk->tkr_mono.clock;
tk->tkr_mono.clock = clock;
- tk->tkr_mono.read = clock->read;
tk->tkr_mono.mask = clock->mask;
- tk->tkr_mono.cycle_last = tk->tkr_mono.read(clock);
+ tk->tkr_mono.cycle_last = tk_clock_read(&tk->tkr_mono);
tk->tkr_raw.clock = clock;
- tk->tkr_raw.read = clock->read;
tk->tkr_raw.mask = clock->mask;
tk->tkr_raw.cycle_last = tk->tkr_mono.cycle_last;
@@ -262,7 +280,7 @@
/* Go back from cycles -> shifted ns */
tk->xtime_interval = interval * clock->mult;
tk->xtime_remainder = ntpinterval - tk->xtime_interval;
- tk->raw_interval = (interval * clock->mult) >> clock->shift;
+ tk->raw_interval = interval * clock->mult;
/* if changing clocks, convert xtime_nsec shift units */
if (old_clock) {
@@ -404,7 +422,7 @@
now += timekeeping_delta_to_ns(tkr,
clocksource_delta(
- tkr->read(tkr->clock),
+ tk_clock_read(tkr),
tkr->cycle_last,
tkr->mask));
} while (read_seqcount_retry(&tkf->seq, seq));
@@ -461,6 +479,10 @@
return cycles_at_suspend;
}
+static struct clocksource dummy_clock = {
+ .read = dummy_clock_read,
+};
+
/**
* halt_fast_timekeeper - Prevent fast timekeeper from accessing clocksource.
* @tk: Timekeeper to snapshot.
@@ -477,13 +499,13 @@
struct tk_read_base *tkr = &tk->tkr_mono;
memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy));
- cycles_at_suspend = tkr->read(tkr->clock);
- tkr_dummy.read = dummy_clock_read;
+ cycles_at_suspend = tk_clock_read(tkr);
+ tkr_dummy.clock = &dummy_clock;
update_fast_timekeeper(&tkr_dummy, &tk_fast_mono);
tkr = &tk->tkr_raw;
memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy));
- tkr_dummy.read = dummy_clock_read;
+ tkr_dummy.clock = &dummy_clock;
update_fast_timekeeper(&tkr_dummy, &tk_fast_raw);
}
@@ -649,11 +671,10 @@
*/
static void timekeeping_forward_now(struct timekeeper *tk)
{
- struct clocksource *clock = tk->tkr_mono.clock;
u64 cycle_now, delta;
u64 nsec;
- cycle_now = tk->tkr_mono.read(clock);
+ cycle_now = tk_clock_read(&tk->tkr_mono);
delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last, tk->tkr_mono.mask);
tk->tkr_mono.cycle_last = cycle_now;
tk->tkr_raw.cycle_last = cycle_now;
@@ -929,8 +950,7 @@
do {
seq = read_seqcount_begin(&tk_core.seq);
-
- now = tk->tkr_mono.read(tk->tkr_mono.clock);
+ now = tk_clock_read(&tk->tkr_mono);
systime_snapshot->cs_was_changed_seq = tk->cs_was_changed_seq;
systime_snapshot->clock_was_set_seq = tk->clock_was_set_seq;
base_real = ktime_add(tk->tkr_mono.base,
@@ -1108,7 +1128,7 @@
* Check whether the system counter value provided by the
* device driver is on the current timekeeping interval.
*/
- now = tk->tkr_mono.read(tk->tkr_mono.clock);
+ now = tk_clock_read(&tk->tkr_mono);
interval_start = tk->tkr_mono.cycle_last;
if (!cycle_between(interval_start, cycles, now)) {
clock_was_set_seq = tk->clock_was_set_seq;
@@ -1629,7 +1649,7 @@
* The less preferred source will only be tried if there is no better
* usable source. The rtc part is handled separately in rtc core code.
*/
- cycle_now = tk->tkr_mono.read(clock);
+ cycle_now = tk_clock_read(&tk->tkr_mono);
if ((clock->flags & CLOCK_SOURCE_SUSPEND_NONSTOP) &&
cycle_now > tk->tkr_mono.cycle_last) {
u64 nsec, cyc_delta;
@@ -1976,7 +1996,7 @@
u32 shift, unsigned int *clock_set)
{
u64 interval = tk->cycle_interval << shift;
- u64 raw_nsecs;
+ u64 snsec_per_sec;
/* If the offset is smaller than a shifted interval, do nothing */
if (offset < interval)
@@ -1991,14 +2011,15 @@
*clock_set |= accumulate_nsecs_to_secs(tk);
/* Accumulate raw time */
- raw_nsecs = (u64)tk->raw_interval << shift;
- raw_nsecs += tk->raw_time.tv_nsec;
- if (raw_nsecs >= NSEC_PER_SEC) {
- u64 raw_secs = raw_nsecs;
- raw_nsecs = do_div(raw_secs, NSEC_PER_SEC);
- tk->raw_time.tv_sec += raw_secs;
+ tk->tkr_raw.xtime_nsec += (u64)tk->raw_time.tv_nsec << tk->tkr_raw.shift;
+ tk->tkr_raw.xtime_nsec += tk->raw_interval << shift;
+ snsec_per_sec = (u64)NSEC_PER_SEC << tk->tkr_raw.shift;
+ while (tk->tkr_raw.xtime_nsec >= snsec_per_sec) {
+ tk->tkr_raw.xtime_nsec -= snsec_per_sec;
+ tk->raw_time.tv_sec++;
}
- tk->raw_time.tv_nsec = raw_nsecs;
+ tk->raw_time.tv_nsec = tk->tkr_raw.xtime_nsec >> tk->tkr_raw.shift;
+ tk->tkr_raw.xtime_nsec -= (u64)tk->raw_time.tv_nsec << tk->tkr_raw.shift;
/* Accumulate error between NTP and clock interval */
tk->ntp_error += tk->ntp_tick << shift;
@@ -2030,7 +2051,7 @@
#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
offset = real_tk->cycle_interval;
#else
- offset = clocksource_delta(tk->tkr_mono.read(tk->tkr_mono.clock),
+ offset = clocksource_delta(tk_clock_read(&tk->tkr_mono),
tk->tkr_mono.cycle_last, tk->tkr_mono.mask);
#endif
diff --git a/lib/libcrc32c.c b/lib/libcrc32c.c
index 74a54b7..9f79547 100644
--- a/lib/libcrc32c.c
+++ b/lib/libcrc32c.c
@@ -43,7 +43,7 @@
u32 crc32c(u32 crc, const void *address, unsigned int length)
{
SHASH_DESC_ON_STACK(shash, tfm);
- u32 *ctx = (u32 *)shash_desc_ctx(shash);
+ u32 ret, *ctx = (u32 *)shash_desc_ctx(shash);
int err;
shash->tfm = tfm;
@@ -53,7 +53,9 @@
err = crypto_shash_update(shash, address, length);
BUG_ON(err);
- return *ctx;
+ ret = *ctx;
+ barrier_data(ctx);
+ return ret;
}
EXPORT_SYMBOL(crc32c);
diff --git a/mm/gup.c b/mm/gup.c
index b3c7214..576c4df 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -387,11 +387,6 @@
/* mlock all present pages, but do not fault in new pages */
if ((*flags & (FOLL_POPULATE | FOLL_MLOCK)) == FOLL_MLOCK)
return -ENOENT;
- /* For mm_populate(), just skip the stack guard page. */
- if ((*flags & FOLL_POPULATE) &&
- (stack_guard_page_start(vma, address) ||
- stack_guard_page_end(vma, address + PAGE_SIZE)))
- return -ENOENT;
if (*flags & FOLL_WRITE)
fault_flags |= FAULT_FLAG_WRITE;
if (*flags & FOLL_REMOTE)
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index a84909c..88c6167 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1426,8 +1426,11 @@
*/
if (unlikely(pmd_trans_migrating(*vmf->pmd))) {
page = pmd_page(*vmf->pmd);
+ if (!get_page_unless_zero(page))
+ goto out_unlock;
spin_unlock(vmf->ptl);
wait_on_page_locked(page);
+ put_page(page);
goto out;
}
@@ -1459,9 +1462,12 @@
/* Migration could have started since the pmd_trans_migrating check */
if (!page_locked) {
+ page_nid = -1;
+ if (!get_page_unless_zero(page))
+ goto out_unlock;
spin_unlock(vmf->ptl);
wait_on_page_locked(page);
- page_nid = -1;
+ put_page(page);
goto out;
}
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 342fac9..ecc183f 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1184,7 +1184,10 @@
* page_remove_rmap() in try_to_unmap_one(). So to determine page status
* correctly, we save a copy of the page flags at this time.
*/
- page_flags = p->flags;
+ if (PageHuge(p))
+ page_flags = hpage->flags;
+ else
+ page_flags = p->flags;
/*
* unpoison always clear PG_hwpoison inside page lock
diff --git a/mm/memory.c b/mm/memory.c
index 2e65df1..bb11c47 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2855,40 +2855,6 @@
}
/*
- * This is like a special single-page "expand_{down|up}wards()",
- * except we must first make sure that 'address{-|+}PAGE_SIZE'
- * doesn't hit another vma.
- */
-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
-{
- address &= PAGE_MASK;
- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
- struct vm_area_struct *prev = vma->vm_prev;
-
- /*
- * Is there a mapping abutting this one below?
- *
- * That's only ok if it's the same stack mapping
- * that has gotten split..
- */
- if (prev && prev->vm_end == address)
- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
-
- return expand_downwards(vma, address - PAGE_SIZE);
- }
- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
- struct vm_area_struct *next = vma->vm_next;
-
- /* As VM_GROWSDOWN but s/below/above/ */
- if (next && next->vm_start == address + PAGE_SIZE)
- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
-
- return expand_upwards(vma, address + PAGE_SIZE);
- }
- return 0;
-}
-
-/*
* We enter with non-exclusive mmap_sem (to exclude vma changes,
* but allow concurrent faults), and pte mapped but not yet locked.
* We return with mmap_sem still held, but pte unmapped and unlocked.
@@ -2904,10 +2870,6 @@
if (vma->vm_flags & VM_SHARED)
return VM_FAULT_SIGBUS;
- /* Check if we need to add a guard page to the stack */
- if (check_stack_guard_page(vma, vmf->address) < 0)
- return VM_FAULT_SIGSEGV;
-
/*
* Use pte_alloc() instead of pte_alloc_map(). We can't run
* pte_offset_map() on pmds where a huge pmd might be created
diff --git a/mm/mmap.c b/mm/mmap.c
index f82741e..8e07976 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -183,6 +183,7 @@
unsigned long retval;
unsigned long newbrk, oldbrk;
struct mm_struct *mm = current->mm;
+ struct vm_area_struct *next;
unsigned long min_brk;
bool populate;
LIST_HEAD(uf);
@@ -229,7 +230,8 @@
}
/* Check against existing mmap mappings. */
- if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE))
+ next = find_vma(mm, oldbrk);
+ if (next && newbrk + PAGE_SIZE > vm_start_gap(next))
goto out;
/* Ok, looks good - let it rip. */
@@ -253,10 +255,22 @@
static long vma_compute_subtree_gap(struct vm_area_struct *vma)
{
- unsigned long max, subtree_gap;
- max = vma->vm_start;
- if (vma->vm_prev)
- max -= vma->vm_prev->vm_end;
+ unsigned long max, prev_end, subtree_gap;
+
+ /*
+ * Note: in the rare case of a VM_GROWSDOWN above a VM_GROWSUP, we
+ * allow two stack_guard_gaps between them here, and when choosing
+ * an unmapped area; whereas when expanding we only require one.
+ * That's a little inconsistent, but keeps the code here simpler.
+ */
+ max = vm_start_gap(vma);
+ if (vma->vm_prev) {
+ prev_end = vm_end_gap(vma->vm_prev);
+ if (max > prev_end)
+ max -= prev_end;
+ else
+ max = 0;
+ }
if (vma->vm_rb.rb_left) {
subtree_gap = rb_entry(vma->vm_rb.rb_left,
struct vm_area_struct, vm_rb)->rb_subtree_gap;
@@ -352,7 +366,7 @@
anon_vma_unlock_read(anon_vma);
}
- highest_address = vma->vm_end;
+ highest_address = vm_end_gap(vma);
vma = vma->vm_next;
i++;
}
@@ -541,7 +555,7 @@
if (vma->vm_next)
vma_gap_update(vma->vm_next);
else
- mm->highest_vm_end = vma->vm_end;
+ mm->highest_vm_end = vm_end_gap(vma);
/*
* vma->vm_prev wasn't known when we followed the rbtree to find the
@@ -856,7 +870,7 @@
vma_gap_update(vma);
if (end_changed) {
if (!next)
- mm->highest_vm_end = end;
+ mm->highest_vm_end = vm_end_gap(vma);
else if (!adjust_next)
vma_gap_update(next);
}
@@ -941,7 +955,7 @@
* mm->highest_vm_end doesn't need any update
* in remove_next == 1 case.
*/
- VM_WARN_ON(mm->highest_vm_end != end);
+ VM_WARN_ON(mm->highest_vm_end != vm_end_gap(vma));
}
}
if (insert && file)
@@ -1787,7 +1801,7 @@
while (true) {
/* Visit left subtree if it looks promising */
- gap_end = vma->vm_start;
+ gap_end = vm_start_gap(vma);
if (gap_end >= low_limit && vma->vm_rb.rb_left) {
struct vm_area_struct *left =
rb_entry(vma->vm_rb.rb_left,
@@ -1798,7 +1812,7 @@
}
}
- gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
+ gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0;
check_current:
/* Check if current node has a suitable gap */
if (gap_start > high_limit)
@@ -1825,8 +1839,8 @@
vma = rb_entry(rb_parent(prev),
struct vm_area_struct, vm_rb);
if (prev == vma->vm_rb.rb_left) {
- gap_start = vma->vm_prev->vm_end;
- gap_end = vma->vm_start;
+ gap_start = vm_end_gap(vma->vm_prev);
+ gap_end = vm_start_gap(vma);
goto check_current;
}
}
@@ -1890,7 +1904,7 @@
while (true) {
/* Visit right subtree if it looks promising */
- gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
+ gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0;
if (gap_start <= high_limit && vma->vm_rb.rb_right) {
struct vm_area_struct *right =
rb_entry(vma->vm_rb.rb_right,
@@ -1903,7 +1917,7 @@
check_current:
/* Check if current node has a suitable gap */
- gap_end = vma->vm_start;
+ gap_end = vm_start_gap(vma);
if (gap_end < low_limit)
return -ENOMEM;
if (gap_start <= high_limit && gap_end - gap_start >= length)
@@ -1929,7 +1943,7 @@
struct vm_area_struct, vm_rb);
if (prev == vma->vm_rb.rb_right) {
gap_start = vma->vm_prev ?
- vma->vm_prev->vm_end : 0;
+ vm_end_gap(vma->vm_prev) : 0;
goto check_current;
}
}
@@ -1967,7 +1981,7 @@
unsigned long len, unsigned long pgoff, unsigned long flags)
{
struct mm_struct *mm = current->mm;
- struct vm_area_struct *vma;
+ struct vm_area_struct *vma, *prev;
struct vm_unmapped_area_info info;
if (len > TASK_SIZE - mmap_min_addr)
@@ -1978,9 +1992,10 @@
if (addr) {
addr = PAGE_ALIGN(addr);
- vma = find_vma(mm, addr);
+ vma = find_vma_prev(mm, addr, &prev);
if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)) &&
+ (!prev || addr >= vm_end_gap(prev)))
return addr;
}
@@ -2003,7 +2018,7 @@
const unsigned long len, const unsigned long pgoff,
const unsigned long flags)
{
- struct vm_area_struct *vma;
+ struct vm_area_struct *vma, *prev;
struct mm_struct *mm = current->mm;
unsigned long addr = addr0;
struct vm_unmapped_area_info info;
@@ -2018,9 +2033,10 @@
/* requesting a specific address */
if (addr) {
addr = PAGE_ALIGN(addr);
- vma = find_vma(mm, addr);
+ vma = find_vma_prev(mm, addr, &prev);
if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)) &&
+ (!prev || addr >= vm_end_gap(prev)))
return addr;
}
@@ -2155,21 +2171,19 @@
* update accounting. This is shared with both the
* grow-up and grow-down cases.
*/
-static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, unsigned long grow)
+static int acct_stack_growth(struct vm_area_struct *vma,
+ unsigned long size, unsigned long grow)
{
struct mm_struct *mm = vma->vm_mm;
struct rlimit *rlim = current->signal->rlim;
- unsigned long new_start, actual_size;
+ unsigned long new_start;
/* address space limit tests */
if (!may_expand_vm(mm, vma->vm_flags, grow))
return -ENOMEM;
/* Stack limit test */
- actual_size = size;
- if (size && (vma->vm_flags & (VM_GROWSUP | VM_GROWSDOWN)))
- actual_size -= PAGE_SIZE;
- if (actual_size > READ_ONCE(rlim[RLIMIT_STACK].rlim_cur))
+ if (size > READ_ONCE(rlim[RLIMIT_STACK].rlim_cur))
return -ENOMEM;
/* mlock limit tests */
@@ -2207,17 +2221,30 @@
int expand_upwards(struct vm_area_struct *vma, unsigned long address)
{
struct mm_struct *mm = vma->vm_mm;
+ struct vm_area_struct *next;
+ unsigned long gap_addr;
int error = 0;
if (!(vma->vm_flags & VM_GROWSUP))
return -EFAULT;
/* Guard against wrapping around to address 0. */
- if (address < PAGE_ALIGN(address+4))
- address = PAGE_ALIGN(address+4);
- else
+ address &= PAGE_MASK;
+ address += PAGE_SIZE;
+ if (!address)
return -ENOMEM;
+ /* Enforce stack_guard_gap */
+ gap_addr = address + stack_guard_gap;
+ if (gap_addr < address)
+ return -ENOMEM;
+ next = vma->vm_next;
+ if (next && next->vm_start < gap_addr) {
+ if (!(next->vm_flags & VM_GROWSUP))
+ return -ENOMEM;
+ /* Check that both stack segments have the same anon_vma? */
+ }
+
/* We must make sure the anon_vma is allocated. */
if (unlikely(anon_vma_prepare(vma)))
return -ENOMEM;
@@ -2261,7 +2288,7 @@
if (vma->vm_next)
vma_gap_update(vma->vm_next);
else
- mm->highest_vm_end = address;
+ mm->highest_vm_end = vm_end_gap(vma);
spin_unlock(&mm->page_table_lock);
perf_event_mmap(vma);
@@ -2282,6 +2309,8 @@
unsigned long address)
{
struct mm_struct *mm = vma->vm_mm;
+ struct vm_area_struct *prev;
+ unsigned long gap_addr;
int error;
address &= PAGE_MASK;
@@ -2289,6 +2318,17 @@
if (error)
return error;
+ /* Enforce stack_guard_gap */
+ gap_addr = address - stack_guard_gap;
+ if (gap_addr > address)
+ return -ENOMEM;
+ prev = vma->vm_prev;
+ if (prev && prev->vm_end > gap_addr) {
+ if (!(prev->vm_flags & VM_GROWSDOWN))
+ return -ENOMEM;
+ /* Check that both stack segments have the same anon_vma? */
+ }
+
/* We must make sure the anon_vma is allocated. */
if (unlikely(anon_vma_prepare(vma)))
return -ENOMEM;
@@ -2343,28 +2383,25 @@
return error;
}
-/*
- * Note how expand_stack() refuses to expand the stack all the way to
- * abut the next virtual mapping, *unless* that mapping itself is also
- * a stack mapping. We want to leave room for a guard page, after all
- * (the guard page itself is not added here, that is done by the
- * actual page faulting logic)
- *
- * This matches the behavior of the guard page logic (see mm/memory.c:
- * check_stack_guard_page()), which only allows the guard page to be
- * removed under these circumstances.
- */
+/* enforced gap between the expanding stack and other mappings. */
+unsigned long stack_guard_gap = 256UL<<PAGE_SHIFT;
+
+static int __init cmdline_parse_stack_guard_gap(char *p)
+{
+ unsigned long val;
+ char *endptr;
+
+ val = simple_strtoul(p, &endptr, 10);
+ if (!*endptr)
+ stack_guard_gap = val << PAGE_SHIFT;
+
+ return 0;
+}
+__setup("stack_guard_gap=", cmdline_parse_stack_guard_gap);
+
#ifdef CONFIG_STACK_GROWSUP
int expand_stack(struct vm_area_struct *vma, unsigned long address)
{
- struct vm_area_struct *next;
-
- address &= PAGE_MASK;
- next = vma->vm_next;
- if (next && next->vm_start == address + PAGE_SIZE) {
- if (!(next->vm_flags & VM_GROWSUP))
- return -ENOMEM;
- }
return expand_upwards(vma, address);
}
@@ -2386,14 +2423,6 @@
#else
int expand_stack(struct vm_area_struct *vma, unsigned long address)
{
- struct vm_area_struct *prev;
-
- address &= PAGE_MASK;
- prev = vma->vm_prev;
- if (prev && prev->vm_end == address) {
- if (!(prev->vm_flags & VM_GROWSDOWN))
- return -ENOMEM;
- }
return expand_downwards(vma, address);
}
@@ -2491,7 +2520,7 @@
vma->vm_prev = prev;
vma_gap_update(vma);
} else
- mm->highest_vm_end = prev ? prev->vm_end : 0;
+ mm->highest_vm_end = prev ? vm_end_gap(prev) : 0;
tail_vma->vm_next = NULL;
/* Kill the cache */
diff --git a/mm/swap_cgroup.c b/mm/swap_cgroup.c
index ac6318a..3405b4e 100644
--- a/mm/swap_cgroup.c
+++ b/mm/swap_cgroup.c
@@ -48,6 +48,9 @@
if (!page)
goto not_enough_page;
ctrl->map[idx] = page;
+
+ if (!(idx % SWAP_CLUSTER_MAX))
+ cond_resched();
}
return 0;
not_enough_page:
diff --git a/mm/vmpressure.c b/mm/vmpressure.c
index 6063581..ce0618b 100644
--- a/mm/vmpressure.c
+++ b/mm/vmpressure.c
@@ -115,9 +115,9 @@
unsigned long pressure = 0;
/*
- * reclaimed can be greater than scanned in cases
- * like THP, where the scanned is 1 and reclaimed
- * could be 512
+ * reclaimed can be greater than scanned for things such as reclaimed
+ * slab pages. shrink_node() just adds reclaimed pages without a
+ * related increment to scanned pages.
*/
if (reclaimed >= scanned)
goto out;
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 953b672..abc5f40 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -813,7 +813,6 @@
free_percpu(vlan->vlan_pcpu_stats);
vlan->vlan_pcpu_stats = NULL;
- free_netdev(dev);
}
void vlan_setup(struct net_device *dev)
@@ -826,7 +825,8 @@
netif_keep_dst(dev);
dev->netdev_ops = &vlan_netdev_ops;
- dev->destructor = vlan_dev_free;
+ dev->needs_free_netdev = true;
+ dev->priv_destructor = vlan_dev_free;
dev->ethtool_ops = &vlan_ethtool_ops;
dev->min_mtu = 0;
diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c
index 013e970..000ca2f 100644
--- a/net/batman-adv/distributed-arp-table.c
+++ b/net/batman-adv/distributed-arp-table.c
@@ -1064,8 +1064,9 @@
skb_new->protocol = eth_type_trans(skb_new, soft_iface);
- soft_iface->stats.rx_packets++;
- soft_iface->stats.rx_bytes += skb->len + ETH_HLEN + hdr_size;
+ batadv_inc_counter(bat_priv, BATADV_CNT_RX);
+ batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES,
+ skb->len + ETH_HLEN + hdr_size);
netif_rx(skb_new);
batadv_dbg(BATADV_DBG_DAT, bat_priv, "ARP request replied locally\n");
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index e1ebe14..ae9f4d3 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -987,7 +987,7 @@
batadv_dbg(BATADV_DBG_BLA, bat_priv,
"recv_unicast_packet(): Dropped unicast pkt received from another backbone gw %pM.\n",
orig_addr_gw);
- return NET_RX_DROP;
+ goto free_skb;
}
}
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index b25789a..10f7edf 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -1034,8 +1034,6 @@
* netdev and its private data (bat_priv)
*/
rcu_barrier();
-
- free_netdev(dev);
}
/**
@@ -1047,7 +1045,8 @@
ether_setup(dev);
dev->netdev_ops = &batadv_netdev_ops;
- dev->destructor = batadv_softif_free;
+ dev->needs_free_netdev = true;
+ dev->priv_destructor = batadv_softif_free;
dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_NETNS_LOCAL;
dev->priv_flags |= IFF_NO_QUEUE;
diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c
index 6089599..ab3b654 100644
--- a/net/bluetooth/6lowpan.c
+++ b/net/bluetooth/6lowpan.c
@@ -598,7 +598,7 @@
dev->netdev_ops = &netdev_ops;
dev->header_ops = &header_ops;
- dev->destructor = free_netdev;
+ dev->needs_free_netdev = true;
}
static struct device_type bt_type = {
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index 430b53e..f0f3447 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -379,7 +379,7 @@
ether_setup(dev);
dev->netdev_ops = &br_netdev_ops;
- dev->destructor = free_netdev;
+ dev->needs_free_netdev = true;
dev->ethtool_ops = &br_ethtool_ops;
SET_NETDEV_DEVTYPE(dev, &br_type);
dev->priv_flags = IFF_EBRIDGE | IFF_NO_QUEUE;
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index 574f788..32bd3ea 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -595,7 +595,7 @@
err = 0;
switch (nla_type(attr)) {
case IFLA_BRIDGE_VLAN_TUNNEL_INFO:
- if (!(p->flags & BR_VLAN_TUNNEL))
+ if (!p || !(p->flags & BR_VLAN_TUNNEL))
return -EINVAL;
err = br_parse_vlan_tunnel_info(attr, &tinfo_curr);
if (err)
diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
index 0db8102..6f12a52 100644
--- a/net/bridge/br_stp_if.c
+++ b/net/bridge/br_stp_if.c
@@ -179,7 +179,8 @@
br_debug(br, "using kernel STP\n");
/* To start timers on any ports left in blocking */
- mod_timer(&br->hello_timer, jiffies + br->hello_time);
+ if (br->dev->flags & IFF_UP)
+ mod_timer(&br->hello_timer, jiffies + br->hello_time);
br_port_state_selection(br);
}
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index adcad34..21f18ea 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -754,6 +754,10 @@
lock_sock(sk);
+ err = -EINVAL;
+ if (addr_len < offsetofend(struct sockaddr, sa_family))
+ goto out;
+
err = -EAFNOSUPPORT;
if (uaddr->sa_family != AF_CAIF)
goto out;
diff --git a/net/caif/cfpkt_skbuff.c b/net/caif/cfpkt_skbuff.c
index 59ce1fc..71b6ab2 100644
--- a/net/caif/cfpkt_skbuff.c
+++ b/net/caif/cfpkt_skbuff.c
@@ -81,11 +81,7 @@
{
struct sk_buff *skb;
- if (likely(in_interrupt()))
- skb = alloc_skb(len + pfx, GFP_ATOMIC);
- else
- skb = alloc_skb(len + pfx, GFP_KERNEL);
-
+ skb = alloc_skb(len + pfx, GFP_ATOMIC);
if (unlikely(skb == NULL))
return NULL;
diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c
index 1816fc9..fe3c53e 100644
--- a/net/caif/chnl_net.c
+++ b/net/caif/chnl_net.c
@@ -392,14 +392,14 @@
{
struct chnl_net *priv = netdev_priv(dev);
caif_free_client(&priv->chnl);
- free_netdev(dev);
}
static void ipcaif_net_setup(struct net_device *dev)
{
struct chnl_net *priv;
dev->netdev_ops = &netdev_ops;
- dev->destructor = chnl_net_destructor;
+ dev->needs_free_netdev = true;
+ dev->priv_destructor = chnl_net_destructor;
dev->flags |= IFF_NOARP;
dev->flags |= IFF_POINTOPOINT;
dev->mtu = GPRS_PDP_MTU;
diff --git a/net/can/af_can.c b/net/can/af_can.c
index b6406fe..88edac0 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -872,8 +872,7 @@
static int can_pernet_init(struct net *net)
{
- net->can.can_rcvlists_lock =
- __SPIN_LOCK_UNLOCKED(net->can.can_rcvlists_lock);
+ spin_lock_init(&net->can.can_rcvlists_lock);
net->can.can_rx_alldev_list =
kzalloc(sizeof(struct dev_rcv_lists), GFP_KERNEL);
diff --git a/net/core/dev.c b/net/core/dev.c
index fca407b..6d60149 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1253,8 +1253,9 @@
if (!new_ifalias)
return -ENOMEM;
dev->ifalias = new_ifalias;
+ memcpy(dev->ifalias, alias, len);
+ dev->ifalias[len] = 0;
- strlcpy(dev->ifalias, alias, len+1);
return len;
}
@@ -4948,6 +4949,19 @@
}
EXPORT_SYMBOL(__skb_gro_checksum_complete);
+static void net_rps_send_ipi(struct softnet_data *remsd)
+{
+#ifdef CONFIG_RPS
+ while (remsd) {
+ struct softnet_data *next = remsd->rps_ipi_next;
+
+ if (cpu_online(remsd->cpu))
+ smp_call_function_single_async(remsd->cpu, &remsd->csd);
+ remsd = next;
+ }
+#endif
+}
+
/*
* net_rps_action_and_irq_enable sends any pending IPI's for rps.
* Note: called with local irq disabled, but exits with local irq enabled.
@@ -4963,14 +4977,7 @@
local_irq_enable();
/* Send pending IPI's to kick RPS processing on remote cpus. */
- while (remsd) {
- struct softnet_data *next = remsd->rps_ipi_next;
-
- if (cpu_online(remsd->cpu))
- smp_call_function_single_async(remsd->cpu,
- &remsd->csd);
- remsd = next;
- }
+ net_rps_send_ipi(remsd);
} else
#endif
local_irq_enable();
@@ -7501,6 +7508,8 @@
err_uninit:
if (dev->netdev_ops->ndo_uninit)
dev->netdev_ops->ndo_uninit(dev);
+ if (dev->priv_destructor)
+ dev->priv_destructor(dev);
goto out;
}
EXPORT_SYMBOL(register_netdevice);
@@ -7708,8 +7717,10 @@
WARN_ON(rcu_access_pointer(dev->ip6_ptr));
WARN_ON(dev->dn_ptr);
- if (dev->destructor)
- dev->destructor(dev);
+ if (dev->priv_destructor)
+ dev->priv_destructor(dev);
+ if (dev->needs_free_netdev)
+ free_netdev(dev);
/* Report a network device has been unregistered */
rtnl_lock();
@@ -8192,7 +8203,7 @@
struct sk_buff **list_skb;
struct sk_buff *skb;
unsigned int cpu;
- struct softnet_data *sd, *oldsd;
+ struct softnet_data *sd, *oldsd, *remsd = NULL;
local_irq_disable();
cpu = smp_processor_id();
@@ -8233,6 +8244,13 @@
raise_softirq_irqoff(NET_TX_SOFTIRQ);
local_irq_enable();
+#ifdef CONFIG_RPS
+ remsd = oldsd->rps_ipi_list;
+ oldsd->rps_ipi_list = NULL;
+#endif
+ /* send out pending IPI's on offline CPU */
+ net_rps_send_ipi(remsd);
+
/* Process offline CPU's input_pkt_queue */
while ((skb = __skb_dequeue(&oldsd->process_queue))) {
netif_rx_ni(skb);
diff --git a/net/core/devlink.c b/net/core/devlink.c
index b0b87a2..a0adfc3 100644
--- a/net/core/devlink.c
+++ b/net/core/devlink.c
@@ -1680,8 +1680,10 @@
hdr = genlmsg_put(skb, info->snd_portid, info->snd_seq,
&devlink_nl_family, NLM_F_MULTI, cmd);
- if (!hdr)
+ if (!hdr) {
+ nlmsg_free(skb);
return -EMSGSIZE;
+ }
if (devlink_nl_put_handle(skb, devlink))
goto nla_put_failure;
@@ -2098,8 +2100,10 @@
hdr = genlmsg_put(skb, info->snd_portid, info->snd_seq,
&devlink_nl_family, NLM_F_MULTI, cmd);
- if (!hdr)
+ if (!hdr) {
+ nlmsg_free(skb);
return -EMSGSIZE;
+ }
if (devlink_nl_put_handle(skb, devlink))
goto nla_put_failure;
diff --git a/net/core/dst.c b/net/core/dst.c
index 6192f11..13ba4a0 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -469,6 +469,20 @@
spin_lock_bh(&dst_garbage.lock);
dst = dst_garbage.list;
dst_garbage.list = NULL;
+ /* The code in dst_ifdown places a hold on the loopback device.
+ * If the gc entry processing is set to expire after a lengthy
+ * interval, this hold can cause netdev_wait_allrefs() to hang
+ * out and wait for a long time -- until the the loopback
+ * interface is released. If we're really unlucky, it'll emit
+ * pr_emerg messages to console too. Reset the interval here,
+ * so dst cleanups occur in a more timely fashion.
+ */
+ if (dst_garbage.timer_inc > DST_GC_INC) {
+ dst_garbage.timer_inc = DST_GC_INC;
+ dst_garbage.timer_expires = DST_GC_MIN;
+ mod_delayed_work(system_wq, &dst_gc_work,
+ dst_garbage.timer_expires);
+ }
spin_unlock_bh(&dst_garbage.lock);
if (last)
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 9e2c0a7..5e61456 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1124,6 +1124,8 @@
struct ifla_vf_mac vf_mac;
struct ifla_vf_info ivi;
+ memset(&ivi, 0, sizeof(ivi));
+
/* Not all SR-IOV capable drivers support the
* spoofcheck and "RSS query enable" query. Preset to
* -1 so the user space tool can detect that the driver
@@ -1132,7 +1134,6 @@
ivi.spoofchk = -1;
ivi.rss_query_en = -1;
ivi.trusted = -1;
- memset(ivi.mac, 0, sizeof(ivi.mac));
/* The default value for VF link state is "auto"
* IFLA_VF_LINK_STATE_AUTO which equals zero
*/
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 346d3e8..b1be7c0 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -3754,8 +3754,11 @@
spin_lock_irqsave(&q->lock, flags);
skb = __skb_dequeue(q);
- if (skb && (skb_next = skb_peek(q)))
+ if (skb && (skb_next = skb_peek(q))) {
icmp_next = is_icmp_err_skb(skb_next);
+ if (icmp_next)
+ sk->sk_err = SKB_EXT_ERR(skb_next)->ee.ee_origin;
+ }
spin_unlock_irqrestore(&q->lock, flags);
if (is_icmp_err_skb(skb) && !icmp_next)
diff --git a/net/decnet/netfilter/dn_rtmsg.c b/net/decnet/netfilter/dn_rtmsg.c
index 1ed81ac..aa8ffec 100644
--- a/net/decnet/netfilter/dn_rtmsg.c
+++ b/net/decnet/netfilter/dn_rtmsg.c
@@ -102,7 +102,9 @@
{
struct nlmsghdr *nlh = nlmsg_hdr(skb);
- if (nlh->nlmsg_len < sizeof(*nlh) || skb->len < nlh->nlmsg_len)
+ if (skb->len < sizeof(*nlh) ||
+ nlh->nlmsg_len < sizeof(*nlh) ||
+ skb->len < nlh->nlmsg_len)
return;
if (!netlink_capable(skb, CAP_NET_ADMIN))
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
index 26130ae..90038d4 100644
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
@@ -223,6 +223,53 @@
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+int dsa_switch_suspend(struct dsa_switch *ds)
+{
+ int i, ret = 0;
+
+ /* Suspend slave network devices */
+ for (i = 0; i < ds->num_ports; i++) {
+ if (!dsa_is_port_initialized(ds, i))
+ continue;
+
+ ret = dsa_slave_suspend(ds->ports[i].netdev);
+ if (ret)
+ return ret;
+ }
+
+ if (ds->ops->suspend)
+ ret = ds->ops->suspend(ds);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dsa_switch_suspend);
+
+int dsa_switch_resume(struct dsa_switch *ds)
+{
+ int i, ret = 0;
+
+ if (ds->ops->resume)
+ ret = ds->ops->resume(ds);
+
+ if (ret)
+ return ret;
+
+ /* Resume slave network devices */
+ for (i = 0; i < ds->num_ports; i++) {
+ if (!dsa_is_port_initialized(ds, i))
+ continue;
+
+ ret = dsa_slave_resume(ds->ports[i].netdev);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dsa_switch_resume);
+#endif
+
static struct packet_type dsa_pack_type __read_mostly = {
.type = cpu_to_be16(ETH_P_XDSA),
.func = dsa_switch_rcv,
diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c
index 033b3bf..7796580 100644
--- a/net/dsa/dsa2.c
+++ b/net/dsa/dsa2.c
@@ -484,8 +484,10 @@
dsa_ds_unapply(dst, ds);
}
- if (dst->cpu_switch)
+ if (dst->cpu_switch) {
dsa_cpu_port_ethtool_restore(dst->cpu_switch);
+ dst->cpu_switch = NULL;
+ }
pr_info("DSA: tree %d unapplied\n", dst->tree);
dst->applied = false;
diff --git a/net/dsa/legacy.c b/net/dsa/legacy.c
index ad345c8..7281098 100644
--- a/net/dsa/legacy.c
+++ b/net/dsa/legacy.c
@@ -289,53 +289,6 @@
dsa_switch_unregister_notifier(ds);
}
-#ifdef CONFIG_PM_SLEEP
-int dsa_switch_suspend(struct dsa_switch *ds)
-{
- int i, ret = 0;
-
- /* Suspend slave network devices */
- for (i = 0; i < ds->num_ports; i++) {
- if (!dsa_is_port_initialized(ds, i))
- continue;
-
- ret = dsa_slave_suspend(ds->ports[i].netdev);
- if (ret)
- return ret;
- }
-
- if (ds->ops->suspend)
- ret = ds->ops->suspend(ds);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(dsa_switch_suspend);
-
-int dsa_switch_resume(struct dsa_switch *ds)
-{
- int i, ret = 0;
-
- if (ds->ops->resume)
- ret = ds->ops->resume(ds);
-
- if (ret)
- return ret;
-
- /* Resume slave network devices */
- for (i = 0; i < ds->num_ports; i++) {
- if (!dsa_is_port_initialized(ds, i))
- continue;
-
- ret = dsa_slave_resume(ds->ports[i].netdev);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(dsa_switch_resume);
-#endif
-
/* platform driver init and cleanup *****************************************/
static int dev_is_class(struct device *dev, void *class)
{
diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c
index c73160f..0a0a392 100644
--- a/net/hsr/hsr_device.c
+++ b/net/hsr/hsr_device.c
@@ -378,7 +378,6 @@
del_timer_sync(&hsr->announce_timer);
synchronize_rcu();
- free_netdev(hsr_dev);
}
static const struct net_device_ops hsr_device_ops = {
@@ -404,7 +403,8 @@
SET_NETDEV_DEVTYPE(dev, &hsr_type);
dev->priv_flags |= IFF_NO_QUEUE;
- dev->destructor = hsr_dev_destroy;
+ dev->needs_free_netdev = true;
+ dev->priv_destructor = hsr_dev_destroy;
dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA |
NETIF_F_GSO_MASK | NETIF_F_HW_CSUM |
diff --git a/net/hsr/hsr_forward.c b/net/hsr/hsr_forward.c
index 4ebe2aa..04b5450 100644
--- a/net/hsr/hsr_forward.c
+++ b/net/hsr/hsr_forward.c
@@ -324,8 +324,7 @@
unsigned long irqflags;
frame->is_supervision = is_supervision_frame(port->hsr, skb);
- frame->node_src = hsr_get_node(&port->hsr->node_db, skb,
- frame->is_supervision);
+ frame->node_src = hsr_get_node(port, skb, frame->is_supervision);
if (frame->node_src == NULL)
return -1; /* Unknown node and !is_supervision, or no mem */
diff --git a/net/hsr/hsr_framereg.c b/net/hsr/hsr_framereg.c
index 7ea9258..284a9b8 100644
--- a/net/hsr/hsr_framereg.c
+++ b/net/hsr/hsr_framereg.c
@@ -158,9 +158,10 @@
/* Get the hsr_node from which 'skb' was sent.
*/
-struct hsr_node *hsr_get_node(struct list_head *node_db, struct sk_buff *skb,
+struct hsr_node *hsr_get_node(struct hsr_port *port, struct sk_buff *skb,
bool is_sup)
{
+ struct list_head *node_db = &port->hsr->node_db;
struct hsr_node *node;
struct ethhdr *ethhdr;
u16 seq_out;
@@ -186,7 +187,11 @@
*/
seq_out = hsr_get_skb_sequence_nr(skb) - 1;
} else {
- WARN_ONCE(1, "%s: Non-HSR frame\n", __func__);
+ /* this is called also for frames from master port and
+ * so warn only for non master ports
+ */
+ if (port->type != HSR_PT_MASTER)
+ WARN_ONCE(1, "%s: Non-HSR frame\n", __func__);
seq_out = HSR_SEQNR_START;
}
diff --git a/net/hsr/hsr_framereg.h b/net/hsr/hsr_framereg.h
index 438b40f..4e04f0e 100644
--- a/net/hsr/hsr_framereg.h
+++ b/net/hsr/hsr_framereg.h
@@ -18,7 +18,7 @@
struct hsr_node *hsr_add_node(struct list_head *node_db, unsigned char addr[],
u16 seq_out);
-struct hsr_node *hsr_get_node(struct list_head *node_db, struct sk_buff *skb,
+struct hsr_node *hsr_get_node(struct hsr_port *port, struct sk_buff *skb,
bool is_sup);
void hsr_handle_sup_frame(struct sk_buff *skb, struct hsr_node *node_curr,
struct hsr_port *port);
diff --git a/net/ieee802154/6lowpan/core.c b/net/ieee802154/6lowpan/core.c
index d7efbf0..0a866f33 100644
--- a/net/ieee802154/6lowpan/core.c
+++ b/net/ieee802154/6lowpan/core.c
@@ -107,7 +107,7 @@
ldev->netdev_ops = &lowpan_netdev_ops;
ldev->header_ops = &lowpan_header_ops;
- ldev->destructor = free_netdev;
+ ldev->needs_free_netdev = true;
ldev->features |= NETIF_F_NETNS_LOCAL;
}
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index f3dad16..58925b6 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1043,7 +1043,7 @@
.type = SOCK_DGRAM,
.protocol = IPPROTO_ICMP,
.prot = &ping_prot,
- .ops = &inet_dgram_ops,
+ .ops = &inet_sockraw_ops,
.flags = INET_PROTOSW_REUSE,
},
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 43318b5..9144fa7 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -657,8 +657,12 @@
/* Needed by both icmp_global_allow and icmp_xmit_lock */
local_bh_disable();
- /* Check global sysctl_icmp_msgs_per_sec ratelimit */
- if (!icmpv4_global_allow(net, type, code))
+ /* Check global sysctl_icmp_msgs_per_sec ratelimit, unless
+ * incoming dev is loopback. If outgoing dev change to not be
+ * loopback, then peer ratelimit still work (in icmpv4_xrlim_allow)
+ */
+ if (!(skb_in->dev && (skb_in->dev->flags&IFF_LOOPBACK)) &&
+ !icmpv4_global_allow(net, type, code))
goto out_bh_enable;
sk = icmp_xmit_lock(net);
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 44fd86d..8f6b5bb 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -2071,21 +2071,26 @@
static void ip_mc_clear_src(struct ip_mc_list *pmc)
{
- struct ip_sf_list *psf, *nextpsf;
+ struct ip_sf_list *psf, *nextpsf, *tomb, *sources;
- for (psf = pmc->tomb; psf; psf = nextpsf) {
- nextpsf = psf->sf_next;
- kfree(psf);
- }
+ spin_lock_bh(&pmc->lock);
+ tomb = pmc->tomb;
pmc->tomb = NULL;
- for (psf = pmc->sources; psf; psf = nextpsf) {
- nextpsf = psf->sf_next;
- kfree(psf);
- }
+ sources = pmc->sources;
pmc->sources = NULL;
pmc->sfmode = MCAST_EXCLUDE;
pmc->sfcount[MCAST_INCLUDE] = 0;
pmc->sfcount[MCAST_EXCLUDE] = 1;
+ spin_unlock_bh(&pmc->lock);
+
+ for (psf = tomb; psf; psf = nextpsf) {
+ nextpsf = psf->sf_next;
+ kfree(psf);
+ }
+ for (psf = sources; psf; psf = nextpsf) {
+ nextpsf = psf->sf_next;
+ kfree(psf);
+ }
}
/* Join a multicast group
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index b878ecb..b436d07 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -967,7 +967,6 @@
gro_cells_destroy(&tunnel->gro_cells);
dst_cache_destroy(&tunnel->dst_cache);
free_percpu(dev->tstats);
- free_netdev(dev);
}
void ip_tunnel_dellink(struct net_device *dev, struct list_head *head)
@@ -1155,7 +1154,8 @@
struct iphdr *iph = &tunnel->parms.iph;
int err;
- dev->destructor = ip_tunnel_dev_free;
+ dev->needs_free_netdev = true;
+ dev->priv_destructor = ip_tunnel_dev_free;
dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
if (!dev->tstats)
return -ENOMEM;
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 551de4d..8ae425c 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -101,8 +101,8 @@
static void ipmr_free_table(struct mr_table *mrt);
static void ip_mr_forward(struct net *net, struct mr_table *mrt,
- struct sk_buff *skb, struct mfc_cache *cache,
- int local);
+ struct net_device *dev, struct sk_buff *skb,
+ struct mfc_cache *cache, int local);
static int ipmr_cache_report(struct mr_table *mrt,
struct sk_buff *pkt, vifi_t vifi, int assert);
static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
@@ -501,7 +501,7 @@
dev->mtu = ETH_DATA_LEN - sizeof(struct iphdr) - 8;
dev->flags = IFF_NOARP;
dev->netdev_ops = ®_vif_netdev_ops;
- dev->destructor = free_netdev;
+ dev->needs_free_netdev = true;
dev->features |= NETIF_F_NETNS_LOCAL;
}
@@ -988,7 +988,7 @@
rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
} else {
- ip_mr_forward(net, mrt, skb, c, 0);
+ ip_mr_forward(net, mrt, skb->dev, skb, c, 0);
}
}
}
@@ -1073,7 +1073,7 @@
/* Queue a packet for resolution. It gets locked cache entry! */
static int ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi,
- struct sk_buff *skb)
+ struct sk_buff *skb, struct net_device *dev)
{
const struct iphdr *iph = ip_hdr(skb);
struct mfc_cache *c;
@@ -1130,6 +1130,10 @@
kfree_skb(skb);
err = -ENOBUFS;
} else {
+ if (dev) {
+ skb->dev = dev;
+ skb->skb_iif = dev->ifindex;
+ }
skb_queue_tail(&c->mfc_un.unres.unresolved, skb);
err = 0;
}
@@ -1828,10 +1832,10 @@
/* "local" means that we should preserve one skb (for local delivery) */
static void ip_mr_forward(struct net *net, struct mr_table *mrt,
- struct sk_buff *skb, struct mfc_cache *cache,
- int local)
+ struct net_device *dev, struct sk_buff *skb,
+ struct mfc_cache *cache, int local)
{
- int true_vifi = ipmr_find_vif(mrt, skb->dev);
+ int true_vifi = ipmr_find_vif(mrt, dev);
int psend = -1;
int vif, ct;
@@ -1853,13 +1857,7 @@
}
/* Wrong interface: drop packet and (maybe) send PIM assert. */
- if (mrt->vif_table[vif].dev != skb->dev) {
- struct net_device *mdev;
-
- mdev = l3mdev_master_dev_rcu(mrt->vif_table[vif].dev);
- if (mdev == skb->dev)
- goto forward;
-
+ if (mrt->vif_table[vif].dev != dev) {
if (rt_is_output_route(skb_rtable(skb))) {
/* It is our own packet, looped back.
* Very complicated situation...
@@ -2053,7 +2051,7 @@
read_lock(&mrt_lock);
vif = ipmr_find_vif(mrt, dev);
if (vif >= 0) {
- int err2 = ipmr_cache_unresolved(mrt, vif, skb);
+ int err2 = ipmr_cache_unresolved(mrt, vif, skb, dev);
read_unlock(&mrt_lock);
return err2;
@@ -2064,7 +2062,7 @@
}
read_lock(&mrt_lock);
- ip_mr_forward(net, mrt, skb, cache, local);
+ ip_mr_forward(net, mrt, dev, skb, cache, local);
read_unlock(&mrt_lock);
if (local)
@@ -2238,7 +2236,7 @@
iph->saddr = saddr;
iph->daddr = daddr;
iph->version = 0;
- err = ipmr_cache_unresolved(mrt, vif, skb2);
+ err = ipmr_cache_unresolved(mrt, vif, skb2, dev);
read_unlock(&mrt_lock);
rcu_read_unlock();
return err;
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 59792d2..b5ea036 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2381,9 +2381,10 @@
return 0;
}
-static int tcp_repair_options_est(struct tcp_sock *tp,
+static int tcp_repair_options_est(struct sock *sk,
struct tcp_repair_opt __user *optbuf, unsigned int len)
{
+ struct tcp_sock *tp = tcp_sk(sk);
struct tcp_repair_opt opt;
while (len >= sizeof(opt)) {
@@ -2396,6 +2397,7 @@
switch (opt.opt_code) {
case TCPOPT_MSS:
tp->rx_opt.mss_clamp = opt.opt_val;
+ tcp_mtup_init(sk);
break;
case TCPOPT_WINDOW:
{
@@ -2555,7 +2557,7 @@
if (!tp->repair)
err = -EINVAL;
else if (sk->sk_state == TCP_ESTABLISHED)
- err = tcp_repair_options_est(tp,
+ err = tcp_repair_options_est(sk,
(struct tcp_repair_opt __user *)optval,
optlen);
else
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index 6e3c512..324c9bc 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -180,6 +180,7 @@
{
const struct inet_connection_sock *icsk = inet_csk(sk);
+ tcp_sk(sk)->prior_ssthresh = 0;
if (icsk->icsk_ca_ops->init)
icsk->icsk_ca_ops->init(sk);
if (tcp_ca_needs_ecn(sk))
diff --git a/net/ipv6/calipso.c b/net/ipv6/calipso.c
index 37ac9de..8d772fe 100644
--- a/net/ipv6/calipso.c
+++ b/net/ipv6/calipso.c
@@ -1319,7 +1319,7 @@
struct ipv6hdr *ip6_hdr;
struct ipv6_opt_hdr *hop;
unsigned char buf[CALIPSO_MAX_BUFFER];
- int len_delta, new_end, pad;
+ int len_delta, new_end, pad, payload;
unsigned int start, end;
ip6_hdr = ipv6_hdr(skb);
@@ -1346,6 +1346,8 @@
if (ret_val < 0)
return ret_val;
+ ip6_hdr = ipv6_hdr(skb); /* Reset as skb_cow() may have moved it */
+
if (len_delta) {
if (len_delta > 0)
skb_push(skb, len_delta);
@@ -1355,6 +1357,8 @@
sizeof(*ip6_hdr) + start);
skb_reset_network_header(skb);
ip6_hdr = ipv6_hdr(skb);
+ payload = ntohs(ip6_hdr->payload_len);
+ ip6_hdr->payload_len = htons(payload + len_delta);
}
hop = (struct ipv6_opt_hdr *)(ip6_hdr + 1);
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 230b5aa..8d7b113 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -491,7 +491,7 @@
local_bh_disable();
/* Check global sysctl_icmp_msgs_per_sec ratelimit */
- if (!icmpv6_global_allow(type))
+ if (!(skb->dev->flags&IFF_LOOPBACK) && !icmpv6_global_allow(type))
goto out_bh_enable;
mip6_addr_swap(skb);
diff --git a/net/ipv6/ila/ila_xlat.c b/net/ipv6/ila/ila_xlat.c
index 2fd5ca15..77f7f8c7 100644
--- a/net/ipv6/ila/ila_xlat.c
+++ b/net/ipv6/ila/ila_xlat.c
@@ -62,6 +62,7 @@
{
u32 *v = (u32 *)loc.v32;
+ __ila_hash_secret_init();
return jhash_2words(v[0], v[1], hashrnd);
}
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 0c5b4caa..64eea396 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -991,13 +991,13 @@
dst_cache_destroy(&t->dst_cache);
free_percpu(dev->tstats);
- free_netdev(dev);
}
static void ip6gre_tunnel_setup(struct net_device *dev)
{
dev->netdev_ops = &ip6gre_netdev_ops;
- dev->destructor = ip6gre_dev_free;
+ dev->needs_free_netdev = true;
+ dev->priv_destructor = ip6gre_dev_free;
dev->type = ARPHRD_IP6GRE;
@@ -1148,7 +1148,7 @@
return 0;
err_reg_dev:
- ip6gre_dev_free(ign->fb_tunnel_dev);
+ free_netdev(ign->fb_tunnel_dev);
err_alloc_dev:
return err;
}
@@ -1300,7 +1300,8 @@
ether_setup(dev);
dev->netdev_ops = &ip6gre_tap_netdev_ops;
- dev->destructor = ip6gre_dev_free;
+ dev->needs_free_netdev = true;
+ dev->priv_destructor = ip6gre_dev_free;
dev->features |= NETIF_F_NETNS_LOCAL;
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index 280268f..cdb3728 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -116,8 +116,10 @@
if (udpfrag) {
int err = ip6_find_1stfragopt(skb, &prevhdr);
- if (err < 0)
+ if (err < 0) {
+ kfree_skb_list(segs);
return ERR_PTR(err);
+ }
fptr = (struct frag_hdr *)((u8 *)ipv6h + err);
fptr->frag_off = htons(offset);
if (skb->next)
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 7ae6c50..c358197 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -254,7 +254,6 @@
gro_cells_destroy(&t->gro_cells);
dst_cache_destroy(&t->dst_cache);
free_percpu(dev->tstats);
- free_netdev(dev);
}
static int ip6_tnl_create2(struct net_device *dev)
@@ -322,7 +321,7 @@
return t;
failed_free:
- ip6_dev_free(dev);
+ free_netdev(dev);
failed:
return ERR_PTR(err);
}
@@ -1095,6 +1094,9 @@
if (!dst) {
route_lookup:
+ /* add dsfield to flowlabel for route lookup */
+ fl6->flowlabel = ip6_make_flowinfo(dsfield, fl6->flowlabel);
+
dst = ip6_route_output(net, NULL, fl6);
if (dst->error)
@@ -1774,7 +1776,8 @@
static void ip6_tnl_dev_setup(struct net_device *dev)
{
dev->netdev_ops = &ip6_tnl_netdev_ops;
- dev->destructor = ip6_dev_free;
+ dev->needs_free_netdev = true;
+ dev->priv_destructor = ip6_dev_free;
dev->type = ARPHRD_TUNNEL6;
dev->flags |= IFF_NOARP;
@@ -2221,7 +2224,7 @@
return 0;
err_register:
- ip6_dev_free(ip6n->fb_tnl_dev);
+ free_netdev(ip6n->fb_tnl_dev);
err_alloc_dev:
return err;
}
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index d67ef56..837ea1e 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -180,7 +180,6 @@
static void vti6_dev_free(struct net_device *dev)
{
free_percpu(dev->tstats);
- free_netdev(dev);
}
static int vti6_tnl_create2(struct net_device *dev)
@@ -235,7 +234,7 @@
return t;
failed_free:
- vti6_dev_free(dev);
+ free_netdev(dev);
failed:
return NULL;
}
@@ -842,7 +841,8 @@
static void vti6_dev_setup(struct net_device *dev)
{
dev->netdev_ops = &vti6_netdev_ops;
- dev->destructor = vti6_dev_free;
+ dev->needs_free_netdev = true;
+ dev->priv_destructor = vti6_dev_free;
dev->type = ARPHRD_TUNNEL6;
dev->hard_header_len = LL_MAX_HEADER + sizeof(struct ipv6hdr);
@@ -1100,7 +1100,7 @@
return 0;
err_register:
- vti6_dev_free(ip6n->fb_tnl_dev);
+ free_netdev(ip6n->fb_tnl_dev);
err_alloc_dev:
return err;
}
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 374997d..2ecb39b 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -733,7 +733,7 @@
dev->mtu = 1500 - sizeof(struct ipv6hdr) - 8;
dev->flags = IFF_NOARP;
dev->netdev_ops = ®_vif_netdev_ops;
- dev->destructor = free_netdev;
+ dev->needs_free_netdev = true;
dev->features |= NETIF_F_NETNS_LOCAL;
}
diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
index 9b522fa..ac826dd 100644
--- a/net/ipv6/ping.c
+++ b/net/ipv6/ping.c
@@ -192,7 +192,7 @@
.type = SOCK_DGRAM,
.protocol = IPPROTO_ICMPV6,
.prot = &pingv6_prot,
- .ops = &inet6_dgram_ops,
+ .ops = &inet6_sockraw_ops,
.flags = INET_PROTOSW_REUSE,
};
diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
index cc8e3ae..e88bcb8 100644
--- a/net/ipv6/proc.c
+++ b/net/ipv6/proc.c
@@ -219,7 +219,7 @@
u64 buff64[SNMP_MIB_MAX];
int i;
- memset(buff64, 0, sizeof(unsigned long) * SNMP_MIB_MAX);
+ memset(buff64, 0, sizeof(u64) * SNMP_MIB_MAX);
snmp_get_cpu_field64_batch(buff64, itemlist, mib, syncpoff);
for (i = 0; itemlist[i].name; i++)
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 1f992d9..60be012 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -1338,7 +1338,7 @@
#endif /* CONFIG_PROC_FS */
/* Same as inet6_dgram_ops, sans udp_poll. */
-static const struct proto_ops inet6_sockraw_ops = {
+const struct proto_ops inet6_sockraw_ops = {
.family = PF_INET6,
.owner = THIS_MODULE,
.release = inet6_release,
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index dc61b0b..7cebd95 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -2804,6 +2804,7 @@
if ((rt->dst.dev == dev || !dev) &&
rt != adn->net->ipv6.ip6_null_entry &&
(rt->rt6i_nsiblings == 0 ||
+ (dev && netdev_unregistering(dev)) ||
!rt->rt6i_idev->cnf.ignore_routes_with_linkdown))
return -1;
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 61e5902..2378503 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -265,7 +265,7 @@
return nt;
failed_free:
- ipip6_dev_free(dev);
+ free_netdev(dev);
failed:
return NULL;
}
@@ -1336,7 +1336,6 @@
dst_cache_destroy(&tunnel->dst_cache);
free_percpu(dev->tstats);
- free_netdev(dev);
}
#define SIT_FEATURES (NETIF_F_SG | \
@@ -1351,7 +1350,8 @@
int t_hlen = tunnel->hlen + sizeof(struct iphdr);
dev->netdev_ops = &ipip6_netdev_ops;
- dev->destructor = ipip6_dev_free;
+ dev->needs_free_netdev = true;
+ dev->priv_destructor = ipip6_dev_free;
dev->type = ARPHRD_SIT;
dev->hard_header_len = LL_MAX_HEADER + t_hlen;
diff --git a/net/ipv6/xfrm6_mode_ro.c b/net/ipv6/xfrm6_mode_ro.c
index 0e01590..07d3657 100644
--- a/net/ipv6/xfrm6_mode_ro.c
+++ b/net/ipv6/xfrm6_mode_ro.c
@@ -47,6 +47,8 @@
iph = ipv6_hdr(skb);
hdr_len = x->type->hdr_offset(x, skb, &prevhdr);
+ if (hdr_len < 0)
+ return hdr_len;
skb_set_mac_header(skb, (prevhdr - x->props.header_len) - skb->data);
skb_set_network_header(skb, -x->props.header_len);
skb->transport_header = skb->network_header + hdr_len;
diff --git a/net/ipv6/xfrm6_mode_transport.c b/net/ipv6/xfrm6_mode_transport.c
index 7a92c0f..9ad07a9 100644
--- a/net/ipv6/xfrm6_mode_transport.c
+++ b/net/ipv6/xfrm6_mode_transport.c
@@ -30,6 +30,8 @@
skb_set_inner_transport_header(skb, skb_transport_offset(skb));
hdr_len = x->type->hdr_offset(x, skb, &prevhdr);
+ if (hdr_len < 0)
+ return hdr_len;
skb_set_mac_header(skb, (prevhdr - x->props.header_len) - skb->data);
skb_set_network_header(skb, -x->props.header_len);
skb->transport_header = skb->network_header + hdr_len;
diff --git a/net/irda/irlan/irlan_eth.c b/net/irda/irlan/irlan_eth.c
index 74d09f9..3be8528 100644
--- a/net/irda/irlan/irlan_eth.c
+++ b/net/irda/irlan/irlan_eth.c
@@ -65,7 +65,7 @@
ether_setup(dev);
dev->netdev_ops = &irlan_eth_netdev_ops;
- dev->destructor = free_netdev;
+ dev->needs_free_netdev = true;
dev->min_mtu = 0;
dev->max_mtu = ETH_MAX_MTU;
diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
index 8b21af7..4de2ec9 100644
--- a/net/l2tp/l2tp_eth.c
+++ b/net/l2tp/l2tp_eth.c
@@ -114,12 +114,13 @@
{
struct l2tp_eth *priv = netdev_priv(dev);
- stats->tx_bytes = atomic_long_read(&priv->tx_bytes);
- stats->tx_packets = atomic_long_read(&priv->tx_packets);
- stats->tx_dropped = atomic_long_read(&priv->tx_dropped);
- stats->rx_bytes = atomic_long_read(&priv->rx_bytes);
- stats->rx_packets = atomic_long_read(&priv->rx_packets);
- stats->rx_errors = atomic_long_read(&priv->rx_errors);
+ stats->tx_bytes = (unsigned long) atomic_long_read(&priv->tx_bytes);
+ stats->tx_packets = (unsigned long) atomic_long_read(&priv->tx_packets);
+ stats->tx_dropped = (unsigned long) atomic_long_read(&priv->tx_dropped);
+ stats->rx_bytes = (unsigned long) atomic_long_read(&priv->rx_bytes);
+ stats->rx_packets = (unsigned long) atomic_long_read(&priv->rx_packets);
+ stats->rx_errors = (unsigned long) atomic_long_read(&priv->rx_errors);
+
}
static const struct net_device_ops l2tp_eth_netdev_ops = {
@@ -141,7 +142,7 @@
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
dev->features |= NETIF_F_LLTX;
dev->netdev_ops = &l2tp_eth_netdev_ops;
- dev->destructor = free_netdev;
+ dev->needs_free_netdev = true;
}
static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb, int data_len)
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index 60e2a62..cf2392b2 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -7,7 +7,7 @@
* Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
* Copyright 2007, Michael Wu <flamingice@sourmilk.net>
* Copyright 2007-2010, Intel Corporation
- * Copyright(c) 2015 Intel Deutschland GmbH
+ * Copyright(c) 2015-2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -741,7 +741,47 @@
ieee80211_agg_start_txq(sta, tid, true);
}
-void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid)
+void ieee80211_start_tx_ba_cb(struct sta_info *sta, int tid,
+ struct tid_ampdu_tx *tid_tx)
+{
+ struct ieee80211_sub_if_data *sdata = sta->sdata;
+ struct ieee80211_local *local = sdata->local;
+
+ if (WARN_ON(test_and_set_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state)))
+ return;
+
+ if (test_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state))
+ ieee80211_agg_tx_operational(local, sta, tid);
+}
+
+static struct tid_ampdu_tx *
+ieee80211_lookup_tid_tx(struct ieee80211_sub_if_data *sdata,
+ const u8 *ra, u16 tid, struct sta_info **sta)
+{
+ struct tid_ampdu_tx *tid_tx;
+
+ if (tid >= IEEE80211_NUM_TIDS) {
+ ht_dbg(sdata, "Bad TID value: tid = %d (>= %d)\n",
+ tid, IEEE80211_NUM_TIDS);
+ return NULL;
+ }
+
+ *sta = sta_info_get_bss(sdata, ra);
+ if (!*sta) {
+ ht_dbg(sdata, "Could not find station: %pM\n", ra);
+ return NULL;
+ }
+
+ tid_tx = rcu_dereference((*sta)->ampdu_mlme.tid_tx[tid]);
+
+ if (WARN_ON(!tid_tx))
+ ht_dbg(sdata, "addBA was not requested!\n");
+
+ return tid_tx;
+}
+
+void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
+ const u8 *ra, u16 tid)
{
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
struct ieee80211_local *local = sdata->local;
@@ -750,57 +790,15 @@
trace_api_start_tx_ba_cb(sdata, ra, tid);
- if (tid >= IEEE80211_NUM_TIDS) {
- ht_dbg(sdata, "Bad TID value: tid = %d (>= %d)\n",
- tid, IEEE80211_NUM_TIDS);
- return;
- }
+ rcu_read_lock();
+ tid_tx = ieee80211_lookup_tid_tx(sdata, ra, tid, &sta);
+ if (!tid_tx)
+ goto out;
- mutex_lock(&local->sta_mtx);
- sta = sta_info_get_bss(sdata, ra);
- if (!sta) {
- mutex_unlock(&local->sta_mtx);
- ht_dbg(sdata, "Could not find station: %pM\n", ra);
- return;
- }
-
- mutex_lock(&sta->ampdu_mlme.mtx);
- tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
-
- if (WARN_ON(!tid_tx)) {
- ht_dbg(sdata, "addBA was not requested!\n");
- goto unlock;
- }
-
- if (WARN_ON(test_and_set_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state)))
- goto unlock;
-
- if (test_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state))
- ieee80211_agg_tx_operational(local, sta, tid);
-
- unlock:
- mutex_unlock(&sta->ampdu_mlme.mtx);
- mutex_unlock(&local->sta_mtx);
-}
-
-void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
- const u8 *ra, u16 tid)
-{
- struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
- struct ieee80211_local *local = sdata->local;
- struct ieee80211_ra_tid *ra_tid;
- struct sk_buff *skb = dev_alloc_skb(0);
-
- if (unlikely(!skb))
- return;
-
- ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
- memcpy(&ra_tid->ra, ra, ETH_ALEN);
- ra_tid->tid = tid;
-
- skb->pkt_type = IEEE80211_SDATA_QUEUE_AGG_START;
- skb_queue_tail(&sdata->skb_queue, skb);
- ieee80211_queue_work(&local->hw, &sdata->work);
+ set_bit(HT_AGG_STATE_START_CB, &tid_tx->state);
+ ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work);
+ out:
+ rcu_read_unlock();
}
EXPORT_SYMBOL(ieee80211_start_tx_ba_cb_irqsafe);
@@ -860,37 +858,18 @@
}
EXPORT_SYMBOL(ieee80211_stop_tx_ba_session);
-void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid)
+void ieee80211_stop_tx_ba_cb(struct sta_info *sta, int tid,
+ struct tid_ampdu_tx *tid_tx)
{
- struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
- struct ieee80211_local *local = sdata->local;
- struct sta_info *sta;
- struct tid_ampdu_tx *tid_tx;
+ struct ieee80211_sub_if_data *sdata = sta->sdata;
bool send_delba = false;
- trace_api_stop_tx_ba_cb(sdata, ra, tid);
+ ht_dbg(sdata, "Stopping Tx BA session for %pM tid %d\n",
+ sta->sta.addr, tid);
- if (tid >= IEEE80211_NUM_TIDS) {
- ht_dbg(sdata, "Bad TID value: tid = %d (>= %d)\n",
- tid, IEEE80211_NUM_TIDS);
- return;
- }
-
- ht_dbg(sdata, "Stopping Tx BA session for %pM tid %d\n", ra, tid);
-
- mutex_lock(&local->sta_mtx);
-
- sta = sta_info_get_bss(sdata, ra);
- if (!sta) {
- ht_dbg(sdata, "Could not find station: %pM\n", ra);
- goto unlock;
- }
-
- mutex_lock(&sta->ampdu_mlme.mtx);
spin_lock_bh(&sta->lock);
- tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
- if (!tid_tx || !test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
+ if (!test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
ht_dbg(sdata,
"unexpected callback to A-MPDU stop for %pM tid %d\n",
sta->sta.addr, tid);
@@ -906,12 +885,8 @@
spin_unlock_bh(&sta->lock);
if (send_delba)
- ieee80211_send_delba(sdata, ra, tid,
+ ieee80211_send_delba(sdata, sta->sta.addr, tid,
WLAN_BACK_INITIATOR, WLAN_REASON_QSTA_NOT_USE);
-
- mutex_unlock(&sta->ampdu_mlme.mtx);
- unlock:
- mutex_unlock(&local->sta_mtx);
}
void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
@@ -919,19 +894,20 @@
{
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
struct ieee80211_local *local = sdata->local;
- struct ieee80211_ra_tid *ra_tid;
- struct sk_buff *skb = dev_alloc_skb(0);
+ struct sta_info *sta;
+ struct tid_ampdu_tx *tid_tx;
- if (unlikely(!skb))
- return;
+ trace_api_stop_tx_ba_cb(sdata, ra, tid);
- ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
- memcpy(&ra_tid->ra, ra, ETH_ALEN);
- ra_tid->tid = tid;
+ rcu_read_lock();
+ tid_tx = ieee80211_lookup_tid_tx(sdata, ra, tid, &sta);
+ if (!tid_tx)
+ goto out;
- skb->pkt_type = IEEE80211_SDATA_QUEUE_AGG_STOP;
- skb_queue_tail(&sdata->skb_queue, skb);
- ieee80211_queue_work(&local->hw, &sdata->work);
+ set_bit(HT_AGG_STATE_STOP_CB, &tid_tx->state);
+ ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work);
+ out:
+ rcu_read_unlock();
}
EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb_irqsafe);
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 6c2e606..4a388fe 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -902,6 +902,8 @@
default:
return -EINVAL;
}
+ sdata->u.ap.req_smps = sdata->smps_mode;
+
sdata->needed_rx_chains = sdata->local->rx_chains;
sdata->vif.bss_conf.beacon_int = params->beacon_interval;
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
index f4a5287..6ca5442 100644
--- a/net/mac80211/ht.c
+++ b/net/mac80211/ht.c
@@ -7,6 +7,7 @@
* Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
* Copyright 2007, Michael Wu <flamingice@sourmilk.net>
* Copyright 2007-2010, Intel Corporation
+ * Copyright 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -289,8 +290,6 @@
{
int i;
- cancel_work_sync(&sta->ampdu_mlme.work);
-
for (i = 0; i < IEEE80211_NUM_TIDS; i++) {
__ieee80211_stop_tx_ba_session(sta, i, reason);
__ieee80211_stop_rx_ba_session(sta, i, WLAN_BACK_RECIPIENT,
@@ -298,6 +297,9 @@
reason != AGG_STOP_DESTROY_STA &&
reason != AGG_STOP_PEER_REQUEST);
}
+
+ /* stopping might queue the work again - so cancel only afterwards */
+ cancel_work_sync(&sta->ampdu_mlme.work);
}
void ieee80211_ba_session_work(struct work_struct *work)
@@ -352,10 +354,16 @@
spin_unlock_bh(&sta->lock);
tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
- if (tid_tx && test_and_clear_bit(HT_AGG_STATE_WANT_STOP,
- &tid_tx->state))
+ if (!tid_tx)
+ continue;
+
+ if (test_and_clear_bit(HT_AGG_STATE_START_CB, &tid_tx->state))
+ ieee80211_start_tx_ba_cb(sta, tid, tid_tx);
+ if (test_and_clear_bit(HT_AGG_STATE_WANT_STOP, &tid_tx->state))
___ieee80211_stop_tx_ba_session(sta, tid,
AGG_STOP_LOCAL_REQUEST);
+ if (test_and_clear_bit(HT_AGG_STATE_STOP_CB, &tid_tx->state))
+ ieee80211_stop_tx_ba_cb(sta, tid, tid_tx);
}
mutex_unlock(&sta->ampdu_mlme.mtx);
}
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index f8f6c14..5e002f6 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -1036,8 +1036,6 @@
enum sdata_queue_type {
IEEE80211_SDATA_QUEUE_TYPE_FRAME = 0,
- IEEE80211_SDATA_QUEUE_AGG_START = 1,
- IEEE80211_SDATA_QUEUE_AGG_STOP = 2,
IEEE80211_SDATA_QUEUE_RX_AGG_START = 3,
IEEE80211_SDATA_QUEUE_RX_AGG_STOP = 4,
};
@@ -1427,12 +1425,6 @@
return local->hw.wiphy->bands[band];
}
-/* this struct represents 802.11n's RA/TID combination */
-struct ieee80211_ra_tid {
- u8 ra[ETH_ALEN];
- u16 tid;
-};
-
/* this struct holds the value parsing from channel switch IE */
struct ieee80211_csa_ie {
struct cfg80211_chan_def chandef;
@@ -1539,7 +1531,7 @@
return true;
/* can't handle non-legacy preamble yet */
if (status->flag & RX_FLAG_MACTIME_PLCP_START &&
- status->encoding != RX_ENC_LEGACY)
+ status->encoding == RX_ENC_LEGACY)
return true;
return false;
}
@@ -1794,8 +1786,10 @@
enum ieee80211_agg_stop_reason reason);
int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
enum ieee80211_agg_stop_reason reason);
-void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid);
-void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid);
+void ieee80211_start_tx_ba_cb(struct sta_info *sta, int tid,
+ struct tid_ampdu_tx *tid_tx);
+void ieee80211_stop_tx_ba_cb(struct sta_info *sta, int tid,
+ struct tid_ampdu_tx *tid_tx);
void ieee80211_ba_session_work(struct work_struct *work);
void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid);
void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid);
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 3bd5b81..f5f5015 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -1213,7 +1213,6 @@
static void ieee80211_if_free(struct net_device *dev)
{
free_percpu(dev->tstats);
- free_netdev(dev);
}
static void ieee80211_if_setup(struct net_device *dev)
@@ -1221,7 +1220,8 @@
ether_setup(dev);
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
dev->netdev_ops = &ieee80211_dataif_ops;
- dev->destructor = ieee80211_if_free;
+ dev->needs_free_netdev = true;
+ dev->priv_destructor = ieee80211_if_free;
}
static void ieee80211_if_setup_no_queue(struct net_device *dev)
@@ -1237,7 +1237,6 @@
struct ieee80211_local *local = sdata->local;
struct sk_buff *skb;
struct sta_info *sta;
- struct ieee80211_ra_tid *ra_tid;
struct ieee80211_rx_agg *rx_agg;
if (!ieee80211_sdata_running(sdata))
@@ -1253,15 +1252,7 @@
while ((skb = skb_dequeue(&sdata->skb_queue))) {
struct ieee80211_mgmt *mgmt = (void *)skb->data;
- if (skb->pkt_type == IEEE80211_SDATA_QUEUE_AGG_START) {
- ra_tid = (void *)&skb->cb;
- ieee80211_start_tx_ba_cb(&sdata->vif, ra_tid->ra,
- ra_tid->tid);
- } else if (skb->pkt_type == IEEE80211_SDATA_QUEUE_AGG_STOP) {
- ra_tid = (void *)&skb->cb;
- ieee80211_stop_tx_ba_cb(&sdata->vif, ra_tid->ra,
- ra_tid->tid);
- } else if (skb->pkt_type == IEEE80211_SDATA_QUEUE_RX_AGG_START) {
+ if (skb->pkt_type == IEEE80211_SDATA_QUEUE_RX_AGG_START) {
rx_agg = (void *)&skb->cb;
mutex_lock(&local->sta_mtx);
sta = sta_info_get_bss(sdata, rx_agg->addr);
@@ -1825,6 +1816,7 @@
ret = dev_alloc_name(ndev, ndev->name);
if (ret < 0) {
ieee80211_if_free(ndev);
+ free_netdev(ndev);
return ret;
}
@@ -1914,7 +1906,7 @@
ret = register_netdevice(ndev);
if (ret) {
- ieee80211_if_free(ndev);
+ free_netdev(ndev);
return ret;
}
}
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 0ea9712..cc8e6ea 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -601,7 +601,7 @@
struct ieee80211_supported_band *sband;
struct ieee80211_chanctx_conf *chanctx_conf;
struct ieee80211_channel *chan;
- u32 rate_flags, rates = 0;
+ u32 rates = 0;
sdata_assert_lock(sdata);
@@ -612,7 +612,6 @@
return;
}
chan = chanctx_conf->def.chan;
- rate_flags = ieee80211_chandef_rate_flags(&chanctx_conf->def);
rcu_read_unlock();
sband = local->hw.wiphy->bands[chan->band];
shift = ieee80211_vif_get_shift(&sdata->vif);
@@ -636,9 +635,6 @@
*/
rates_len = 0;
for (i = 0; i < sband->n_bitrates; i++) {
- if ((rate_flags & sband->bitrates[i].flags)
- != rate_flags)
- continue;
rates |= BIT(i);
rates_len++;
}
@@ -2818,7 +2814,7 @@
u32 *rates, u32 *basic_rates,
bool *have_higher_than_11mbit,
int *min_rate, int *min_rate_index,
- int shift, u32 rate_flags)
+ int shift)
{
int i, j;
@@ -2846,8 +2842,6 @@
int brate;
br = &sband->bitrates[j];
- if ((rate_flags & br->flags) != rate_flags)
- continue;
brate = DIV_ROUND_UP(br->bitrate, (1 << shift) * 5);
if (brate == rate) {
@@ -4398,40 +4392,32 @@
return -ENOMEM;
}
- if (new_sta || override) {
- err = ieee80211_prep_channel(sdata, cbss);
- if (err) {
- if (new_sta)
- sta_info_free(local, new_sta);
- return -EINVAL;
- }
- }
-
+ /*
+ * Set up the information for the new channel before setting the
+ * new channel. We can't - completely race-free - change the basic
+ * rates bitmap and the channel (sband) that it refers to, but if
+ * we set it up before we at least avoid calling into the driver's
+ * bss_info_changed() method with invalid information (since we do
+ * call that from changing the channel - only for IDLE and perhaps
+ * some others, but ...).
+ *
+ * So to avoid that, just set up all the new information before the
+ * channel, but tell the driver to apply it only afterwards, since
+ * it might need the new channel for that.
+ */
if (new_sta) {
u32 rates = 0, basic_rates = 0;
bool have_higher_than_11mbit;
int min_rate = INT_MAX, min_rate_index = -1;
- struct ieee80211_chanctx_conf *chanctx_conf;
const struct cfg80211_bss_ies *ies;
int shift = ieee80211_vif_get_shift(&sdata->vif);
- u32 rate_flags;
-
- rcu_read_lock();
- chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
- if (WARN_ON(!chanctx_conf)) {
- rcu_read_unlock();
- sta_info_free(local, new_sta);
- return -EINVAL;
- }
- rate_flags = ieee80211_chandef_rate_flags(&chanctx_conf->def);
- rcu_read_unlock();
ieee80211_get_rates(sband, bss->supp_rates,
bss->supp_rates_len,
&rates, &basic_rates,
&have_higher_than_11mbit,
&min_rate, &min_rate_index,
- shift, rate_flags);
+ shift);
/*
* This used to be a workaround for basic rates missing
@@ -4489,8 +4475,22 @@
sdata->vif.bss_conf.sync_dtim_count = 0;
}
rcu_read_unlock();
+ }
- /* tell driver about BSSID, basic rates and timing */
+ if (new_sta || override) {
+ err = ieee80211_prep_channel(sdata, cbss);
+ if (err) {
+ if (new_sta)
+ sta_info_free(local, new_sta);
+ return -EINVAL;
+ }
+ }
+
+ if (new_sta) {
+ /*
+ * tell driver about BSSID, basic rates and timing
+ * this was set up above, before setting the channel
+ */
ieee80211_bss_info_change_notify(sdata,
BSS_CHANGED_BSSID | BSS_CHANGED_BASIC_RATES |
BSS_CHANGED_BEACON_INT);
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 1f75280..3674fe3 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -1613,12 +1613,16 @@
*/
if (!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS) &&
!ieee80211_has_morefrags(hdr->frame_control) &&
+ !ieee80211_is_back_req(hdr->frame_control) &&
!(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) &&
(rx->sdata->vif.type == NL80211_IFTYPE_AP ||
rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) &&
- /* PM bit is only checked in frames where it isn't reserved,
+ /*
+ * PM bit is only checked in frames where it isn't reserved,
* in AP mode it's reserved in non-bufferable management frames
* (cf. IEEE 802.11-2012 8.2.4.1.7 Power Management field)
+ * BAR frames should be ignored as specified in
+ * IEEE 802.11-2012 10.2.1.2.
*/
(!ieee80211_is_mgmt(hdr->frame_control) ||
ieee80211_is_bufferable_mmpdu(hdr->frame_control))) {
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 7cdf7a8..403e3cc 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -2155,7 +2155,7 @@
struct ieee80211_sta_rx_stats *cpurxs;
cpurxs = per_cpu_ptr(sta->pcpu_rx_stats, cpu);
- sinfo->rx_packets += cpurxs->dropped;
+ sinfo->rx_dropped_misc += cpurxs->dropped;
}
}
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index 5609cac..ea0747d 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -116,6 +116,8 @@
#define HT_AGG_STATE_STOPPING 3
#define HT_AGG_STATE_WANT_START 4
#define HT_AGG_STATE_WANT_STOP 5
+#define HT_AGG_STATE_START_CB 6
+#define HT_AGG_STATE_STOP_CB 7
enum ieee80211_agg_stop_reason {
AGG_STOP_DECLINED,
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index c1ef22d..cc19614 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -17,6 +17,7 @@
#include <asm/unaligned.h>
#include <net/mac80211.h>
#include <crypto/aes.h>
+#include <crypto/algapi.h>
#include "ieee80211_i.h"
#include "michael.h"
@@ -153,7 +154,7 @@
data_len = skb->len - hdrlen - MICHAEL_MIC_LEN;
key = &rx->key->conf.key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY];
michael_mic(key, hdr, data, data_len, mic);
- if (memcmp(mic, data + data_len, MICHAEL_MIC_LEN) != 0)
+ if (crypto_memneq(mic, data + data_len, MICHAEL_MIC_LEN))
goto mic_fail;
/* remove Michael MIC from payload */
@@ -1048,7 +1049,7 @@
bip_aad(skb, aad);
ieee80211_aes_cmac(key->u.aes_cmac.tfm, aad,
skb->data + 24, skb->len - 24, mic);
- if (memcmp(mic, mmie->mic, sizeof(mmie->mic)) != 0) {
+ if (crypto_memneq(mic, mmie->mic, sizeof(mmie->mic))) {
key->u.aes_cmac.icverrors++;
return RX_DROP_UNUSABLE;
}
@@ -1098,7 +1099,7 @@
bip_aad(skb, aad);
ieee80211_aes_cmac_256(key->u.aes_cmac.tfm, aad,
skb->data + 24, skb->len - 24, mic);
- if (memcmp(mic, mmie->mic, sizeof(mmie->mic)) != 0) {
+ if (crypto_memneq(mic, mmie->mic, sizeof(mmie->mic))) {
key->u.aes_cmac.icverrors++;
return RX_DROP_UNUSABLE;
}
@@ -1202,7 +1203,7 @@
if (ieee80211_aes_gmac(key->u.aes_gmac.tfm, aad, nonce,
skb->data + 24, skb->len - 24,
mic) < 0 ||
- memcmp(mic, mmie->mic, sizeof(mmie->mic)) != 0) {
+ crypto_memneq(mic, mmie->mic, sizeof(mmie->mic))) {
key->u.aes_gmac.icverrors++;
return RX_DROP_UNUSABLE;
}
diff --git a/net/mac802154/iface.c b/net/mac802154/iface.c
index 06019db..bd88a9b 100644
--- a/net/mac802154/iface.c
+++ b/net/mac802154/iface.c
@@ -526,8 +526,6 @@
struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
mac802154_llsec_destroy(&sdata->sec);
-
- free_netdev(dev);
}
static void ieee802154_if_setup(struct net_device *dev)
@@ -593,7 +591,8 @@
sdata->dev->dev_addr);
sdata->dev->header_ops = &mac802154_header_ops;
- sdata->dev->destructor = mac802154_wpan_free;
+ sdata->dev->needs_free_netdev = true;
+ sdata->dev->priv_destructor = mac802154_wpan_free;
sdata->dev->netdev_ops = &mac802154_wpan_ops;
sdata->dev->ml_priv = &mac802154_mlme_wpan;
wpan_dev->promiscuous_mode = false;
@@ -608,7 +607,7 @@
break;
case NL802154_IFTYPE_MONITOR:
- sdata->dev->destructor = free_netdev;
+ sdata->dev->needs_free_netdev = true;
sdata->dev->netdev_ops = &mac802154_monitor_ops;
wpan_dev->promiscuous_mode = true;
break;
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
index 257ec66..7b05fd1 100644
--- a/net/mpls/af_mpls.c
+++ b/net/mpls/af_mpls.c
@@ -1418,7 +1418,7 @@
continue;
alive++;
nh_flags &= ~flags;
- WRITE_ONCE(nh->nh_flags, flags);
+ WRITE_ONCE(nh->nh_flags, nh_flags);
} endfor_nexthops(rt);
WRITE_ONCE(rt->rt_nhn_alive, alive);
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 9799a50..a8be9b7 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -890,8 +890,13 @@
}
out:
local_bh_enable();
- if (last)
+ if (last) {
+ /* nf ct hash resize happened, now clear the leftover. */
+ if ((struct nf_conn *)cb->args[1] == last)
+ cb->args[1] = 0;
+
nf_ct_put(last);
+ }
while (i) {
i--;
diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
index 13875d5..1c5b14a 100644
--- a/net/netfilter/nf_conntrack_proto_sctp.c
+++ b/net/netfilter/nf_conntrack_proto_sctp.c
@@ -512,16 +512,19 @@
u8 pf, unsigned int hooknum)
{
const struct sctphdr *sh;
- struct sctphdr _sctph;
const char *logmsg;
- sh = skb_header_pointer(skb, dataoff, sizeof(_sctph), &_sctph);
- if (!sh) {
+ if (skb->len < dataoff + sizeof(struct sctphdr)) {
logmsg = "nf_ct_sctp: short packet ";
goto out_invalid;
}
if (net->ct.sysctl_checksum && hooknum == NF_INET_PRE_ROUTING &&
skb->ip_summed == CHECKSUM_NONE) {
+ if (!skb_make_writable(skb, dataoff + sizeof(struct sctphdr))) {
+ logmsg = "nf_ct_sctp: failed to read header ";
+ goto out_invalid;
+ }
+ sh = (const struct sctphdr *)(skb->data + dataoff);
if (sh->checksum != sctp_compute_cksum(skb, dataoff)) {
logmsg = "nf_ct_sctp: bad CRC ";
goto out_invalid;
diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
index ef0be32..6c72922 100644
--- a/net/netfilter/nf_nat_core.c
+++ b/net/netfilter/nf_nat_core.c
@@ -566,7 +566,7 @@
* Else, when the conntrack is destoyed, nf_nat_cleanup_conntrack()
* will delete entry from already-freed table.
*/
- ct->status &= ~IPS_NAT_DONE_MASK;
+ clear_bit(IPS_SRC_NAT_DONE_BIT, &ct->status);
rhltable_remove(&nf_nat_bysource_table, &ct->nat_bysource,
nf_nat_bysource_params);
diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c
index e97e2fb..fbdbaa0 100644
--- a/net/netfilter/nft_set_rbtree.c
+++ b/net/netfilter/nft_set_rbtree.c
@@ -116,17 +116,17 @@
else if (d > 0)
p = &parent->rb_right;
else {
- if (nft_set_elem_active(&rbe->ext, genmask)) {
- if (nft_rbtree_interval_end(rbe) &&
- !nft_rbtree_interval_end(new))
- p = &parent->rb_left;
- else if (!nft_rbtree_interval_end(rbe) &&
- nft_rbtree_interval_end(new))
- p = &parent->rb_right;
- else {
- *ext = &rbe->ext;
- return -EEXIST;
- }
+ if (nft_rbtree_interval_end(rbe) &&
+ !nft_rbtree_interval_end(new)) {
+ p = &parent->rb_left;
+ } else if (!nft_rbtree_interval_end(rbe) &&
+ nft_rbtree_interval_end(new)) {
+ p = &parent->rb_right;
+ } else if (nft_set_elem_active(&rbe->ext, genmask)) {
+ *ext = &rbe->ext;
+ return -EEXIST;
+ } else {
+ p = &parent->rb_left;
}
}
}
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index ee841f0..7586d44 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -62,6 +62,7 @@
#include <asm/cacheflush.h>
#include <linux/hash.h>
#include <linux/genetlink.h>
+#include <linux/net_namespace.h>
#include <net/net_namespace.h>
#include <net/sock.h>
@@ -1415,7 +1416,8 @@
goto out;
}
NETLINK_CB(p->skb2).nsid = peernet2id(sock_net(sk), p->net);
- NETLINK_CB(p->skb2).nsid_is_set = true;
+ if (NETLINK_CB(p->skb2).nsid != NETNSA_NSID_NOT_ASSIGNED)
+ NETLINK_CB(p->skb2).nsid_is_set = true;
val = netlink_broadcast_deliver(sk, p->skb2);
if (val < 0) {
netlink_overrun(sk);
diff --git a/net/openvswitch/vport-internal_dev.c b/net/openvswitch/vport-internal_dev.c
index 89193a6..04a3128 100644
--- a/net/openvswitch/vport-internal_dev.c
+++ b/net/openvswitch/vport-internal_dev.c
@@ -94,7 +94,6 @@
struct vport *vport = ovs_internal_dev_get_vport(dev);
ovs_vport_free(vport);
- free_netdev(dev);
}
static void
@@ -156,7 +155,8 @@
netdev->priv_flags &= ~IFF_TX_SKB_SHARING;
netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_OPENVSWITCH |
IFF_PHONY_HEADROOM | IFF_NO_QUEUE;
- netdev->destructor = internal_dev_destructor;
+ netdev->needs_free_netdev = true;
+ netdev->priv_destructor = internal_dev_destructor;
netdev->ethtool_ops = &internal_dev_ethtool_ops;
netdev->rtnl_link_ops = &internal_dev_link_ops;
diff --git a/net/phonet/pep-gprs.c b/net/phonet/pep-gprs.c
index 21c28b5..2c93379 100644
--- a/net/phonet/pep-gprs.c
+++ b/net/phonet/pep-gprs.c
@@ -236,7 +236,7 @@
dev->tx_queue_len = 10;
dev->netdev_ops = &gprs_netdev_ops;
- dev->destructor = free_netdev;
+ dev->needs_free_netdev = true;
}
/*
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index 164b5ac..7dc5892 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -94,8 +94,10 @@
k++;
}
- if (n)
+ if (n) {
+ err = -EINVAL;
goto err_out;
+ }
return keys_ex;
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index f42008b..b062bc8 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -132,21 +132,21 @@
}
}
- spin_lock_bh(&police->tcf_lock);
if (est) {
err = gen_replace_estimator(&police->tcf_bstats, NULL,
&police->tcf_rate_est,
&police->tcf_lock,
NULL, est);
if (err)
- goto failure_unlock;
+ goto failure;
} else if (tb[TCA_POLICE_AVRATE] &&
(ret == ACT_P_CREATED ||
!gen_estimator_active(&police->tcf_rate_est))) {
err = -EINVAL;
- goto failure_unlock;
+ goto failure;
}
+ spin_lock_bh(&police->tcf_lock);
/* No failure allowed after this point */
police->tcfp_mtu = parm->mtu;
if (police->tcfp_mtu == 0) {
@@ -192,8 +192,6 @@
return ret;
-failure_unlock:
- spin_unlock_bh(&police->tcf_lock);
failure:
qdisc_put_rtab(P_tab);
qdisc_put_rtab(R_tab);
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index f16c8d9..30aa0a5 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -4622,13 +4622,13 @@
for (head = sctp_ep_hashtable; hash < sctp_ep_hashsize;
hash++, head++) {
- read_lock(&head->lock);
+ read_lock_bh(&head->lock);
sctp_for_each_hentry(epb, &head->chain) {
err = cb(sctp_ep(epb), p);
if (err)
break;
}
- read_unlock(&head->lock);
+ read_unlock_bh(&head->lock);
}
return err;
diff --git a/net/sunrpc/xprtrdma/backchannel.c b/net/sunrpc/xprtrdma/backchannel.c
index 24fedd4..03f6b58 100644
--- a/net/sunrpc/xprtrdma/backchannel.c
+++ b/net/sunrpc/xprtrdma/backchannel.c
@@ -119,11 +119,9 @@
for (i = 0; i < (reqs << 1); i++) {
rqst = kzalloc(sizeof(*rqst), GFP_KERNEL);
- if (!rqst) {
- pr_err("RPC: %s: Failed to create bc rpc_rqst\n",
- __func__);
+ if (!rqst)
goto out_free;
- }
+
dprintk("RPC: %s: new rqst %p\n", __func__, rqst);
rqst->rq_xprt = &r_xprt->rx_xprt;
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 16aff8d..d5b54c0 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -2432,7 +2432,12 @@
case -ENETUNREACH:
case -EADDRINUSE:
case -ENOBUFS:
- /* retry with existing socket, after a delay */
+ /*
+ * xs_tcp_force_close() wakes tasks with -EIO.
+ * We need to wake them first to ensure the
+ * correct error code.
+ */
+ xprt_wake_pending_tasks(xprt, status);
xs_tcp_force_close(xprt);
goto out;
}
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
index 312ef7d..ab30876 100644
--- a/net/tipc/msg.c
+++ b/net/tipc/msg.c
@@ -508,7 +508,7 @@
}
if (skb_cloned(_skb) &&
- pskb_expand_head(_skb, BUF_HEADROOM, BUF_TAILROOM, GFP_KERNEL))
+ pskb_expand_head(_skb, BUF_HEADROOM, BUF_TAILROOM, GFP_ATOMIC))
goto exit;
/* Now reverse the concerned fields */
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 6a7fe76..1a0c961 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -999,7 +999,8 @@
struct path path = { };
err = -EINVAL;
- if (sunaddr->sun_family != AF_UNIX)
+ if (addr_len < offsetofend(struct sockaddr_un, sun_family) ||
+ sunaddr->sun_family != AF_UNIX)
goto out;
if (addr_len == sizeof(short)) {
@@ -1110,6 +1111,10 @@
unsigned int hash;
int err;
+ err = -EINVAL;
+ if (alen < offsetofend(struct sockaddr, sa_family))
+ goto out;
+
if (addr->sa_family != AF_UNSPEC) {
err = unix_mkname(sunaddr, alen, &hash);
if (err < 0)
diff --git a/security/keys/Kconfig b/security/keys/Kconfig
index 6fd95f7..a7a23b5 100644
--- a/security/keys/Kconfig
+++ b/security/keys/Kconfig
@@ -20,6 +20,10 @@
If you are unsure as to whether this is required, answer N.
+config KEYS_COMPAT
+ def_bool y
+ depends on COMPAT && KEYS
+
config PERSISTENT_KEYRINGS
bool "Enable register of persistent per-UID keyrings"
depends on KEYS
@@ -89,9 +93,9 @@
config KEY_DH_OPERATIONS
bool "Diffie-Hellman operations on retained keys"
depends on KEYS
- select MPILIB
select CRYPTO
select CRYPTO_HASH
+ select CRYPTO_DH
help
This option provides support for calculating Diffie-Hellman
public keys and shared secrets using values stored as keys
diff --git a/security/keys/dh.c b/security/keys/dh.c
index e603bd9..4755d4b 100644
--- a/security/keys/dh.c
+++ b/security/keys/dh.c
@@ -8,34 +8,17 @@
* 2 of the License, or (at your option) any later version.
*/
-#include <linux/mpi.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
+#include <linux/scatterlist.h>
#include <linux/crypto.h>
#include <crypto/hash.h>
+#include <crypto/kpp.h>
+#include <crypto/dh.h>
#include <keys/user-type.h>
#include "internal.h"
-/*
- * Public key or shared secret generation function [RFC2631 sec 2.1.1]
- *
- * ya = g^xa mod p;
- * or
- * ZZ = yb^xa mod p;
- *
- * where xa is the local private key, ya is the local public key, g is
- * the generator, p is the prime, yb is the remote public key, and ZZ
- * is the shared secret.
- *
- * Both are the same calculation, so g or yb are the "base" and ya or
- * ZZ are the "result".
- */
-static int do_dh(MPI result, MPI base, MPI xa, MPI p)
-{
- return mpi_powm(result, base, xa, p);
-}
-
-static ssize_t mpi_from_key(key_serial_t keyid, size_t maxlen, MPI *mpi)
+static ssize_t dh_data_from_key(key_serial_t keyid, void **data)
{
struct key *key;
key_ref_t key_ref;
@@ -56,19 +39,17 @@
status = key_validate(key);
if (status == 0) {
const struct user_key_payload *payload;
+ uint8_t *duplicate;
payload = user_key_payload_locked(key);
- if (maxlen == 0) {
- *mpi = NULL;
+ duplicate = kmemdup(payload->data, payload->datalen,
+ GFP_KERNEL);
+ if (duplicate) {
+ *data = duplicate;
ret = payload->datalen;
- } else if (payload->datalen <= maxlen) {
- *mpi = mpi_read_raw_data(payload->data,
- payload->datalen);
- if (*mpi)
- ret = payload->datalen;
} else {
- ret = -EINVAL;
+ ret = -ENOMEM;
}
}
up_read(&key->sem);
@@ -79,6 +60,29 @@
return ret;
}
+static void dh_free_data(struct dh *dh)
+{
+ kzfree(dh->key);
+ kzfree(dh->p);
+ kzfree(dh->g);
+}
+
+struct dh_completion {
+ struct completion completion;
+ int err;
+};
+
+static void dh_crypto_done(struct crypto_async_request *req, int err)
+{
+ struct dh_completion *compl = req->data;
+
+ if (err == -EINPROGRESS)
+ return;
+
+ compl->err = err;
+ complete(&compl->completion);
+}
+
struct kdf_sdesc {
struct shash_desc shash;
char ctx[];
@@ -89,6 +93,7 @@
struct crypto_shash *tfm;
struct kdf_sdesc *sdesc;
int size;
+ int err;
/* allocate synchronous hash */
tfm = crypto_alloc_shash(hashname, 0, 0);
@@ -97,16 +102,25 @@
return PTR_ERR(tfm);
}
+ err = -EINVAL;
+ if (crypto_shash_digestsize(tfm) == 0)
+ goto out_free_tfm;
+
+ err = -ENOMEM;
size = sizeof(struct shash_desc) + crypto_shash_descsize(tfm);
sdesc = kmalloc(size, GFP_KERNEL);
if (!sdesc)
- return -ENOMEM;
+ goto out_free_tfm;
sdesc->shash.tfm = tfm;
sdesc->shash.flags = 0x0;
*sdesc_ret = sdesc;
return 0;
+
+out_free_tfm:
+ crypto_free_shash(tfm);
+ return err;
}
static void kdf_dealloc(struct kdf_sdesc *sdesc)
@@ -120,14 +134,6 @@
kzfree(sdesc);
}
-/* convert 32 bit integer into its string representation */
-static inline void crypto_kw_cpu_to_be32(u32 val, u8 *buf)
-{
- __be32 *a = (__be32 *)buf;
-
- *a = cpu_to_be32(val);
-}
-
/*
* Implementation of the KDF in counter mode according to SP800-108 section 5.1
* as well as SP800-56A section 5.8.1 (Single-step KDF).
@@ -138,25 +144,39 @@
* 5.8.1.2).
*/
static int kdf_ctr(struct kdf_sdesc *sdesc, const u8 *src, unsigned int slen,
- u8 *dst, unsigned int dlen)
+ u8 *dst, unsigned int dlen, unsigned int zlen)
{
struct shash_desc *desc = &sdesc->shash;
unsigned int h = crypto_shash_digestsize(desc->tfm);
int err = 0;
u8 *dst_orig = dst;
- u32 i = 1;
- u8 iteration[sizeof(u32)];
+ __be32 counter = cpu_to_be32(1);
while (dlen) {
err = crypto_shash_init(desc);
if (err)
goto err;
- crypto_kw_cpu_to_be32(i, iteration);
- err = crypto_shash_update(desc, iteration, sizeof(u32));
+ err = crypto_shash_update(desc, (u8 *)&counter, sizeof(__be32));
if (err)
goto err;
+ if (zlen && h) {
+ u8 tmpbuffer[h];
+ size_t chunk = min_t(size_t, zlen, h);
+ memset(tmpbuffer, 0, chunk);
+
+ do {
+ err = crypto_shash_update(desc, tmpbuffer,
+ chunk);
+ if (err)
+ goto err;
+
+ zlen -= chunk;
+ chunk = min_t(size_t, zlen, h);
+ } while (zlen);
+ }
+
if (src && slen) {
err = crypto_shash_update(desc, src, slen);
if (err)
@@ -179,7 +199,7 @@
dlen -= h;
dst += h;
- i++;
+ counter = cpu_to_be32(be32_to_cpu(counter) + 1);
}
}
@@ -192,7 +212,7 @@
static int keyctl_dh_compute_kdf(struct kdf_sdesc *sdesc,
char __user *buffer, size_t buflen,
- uint8_t *kbuf, size_t kbuflen)
+ uint8_t *kbuf, size_t kbuflen, size_t lzero)
{
uint8_t *outbuf = NULL;
int ret;
@@ -203,7 +223,7 @@
goto err;
}
- ret = kdf_ctr(sdesc, kbuf, kbuflen, outbuf, buflen);
+ ret = kdf_ctr(sdesc, kbuf, kbuflen, outbuf, buflen, lzero);
if (ret)
goto err;
@@ -221,21 +241,26 @@
struct keyctl_kdf_params *kdfcopy)
{
long ret;
- MPI base, private, prime, result;
- unsigned nbytes;
+ ssize_t dlen;
+ int secretlen;
+ int outlen;
struct keyctl_dh_params pcopy;
- uint8_t *kbuf;
- ssize_t keylen;
- size_t resultlen;
+ struct dh dh_inputs;
+ struct scatterlist outsg;
+ struct dh_completion compl;
+ struct crypto_kpp *tfm;
+ struct kpp_request *req;
+ uint8_t *secret;
+ uint8_t *outbuf;
struct kdf_sdesc *sdesc = NULL;
if (!params || (!buffer && buflen)) {
ret = -EINVAL;
- goto out;
+ goto out1;
}
if (copy_from_user(&pcopy, params, sizeof(pcopy)) != 0) {
ret = -EFAULT;
- goto out;
+ goto out1;
}
if (kdfcopy) {
@@ -244,104 +269,147 @@
if (buflen > KEYCTL_KDF_MAX_OUTPUT_LEN ||
kdfcopy->otherinfolen > KEYCTL_KDF_MAX_OI_LEN) {
ret = -EMSGSIZE;
- goto out;
+ goto out1;
}
/* get KDF name string */
hashname = strndup_user(kdfcopy->hashname, CRYPTO_MAX_ALG_NAME);
if (IS_ERR(hashname)) {
ret = PTR_ERR(hashname);
- goto out;
+ goto out1;
}
/* allocate KDF from the kernel crypto API */
ret = kdf_alloc(&sdesc, hashname);
kfree(hashname);
if (ret)
- goto out;
+ goto out1;
}
- /*
- * If the caller requests postprocessing with a KDF, allow an
- * arbitrary output buffer size since the KDF ensures proper truncation.
- */
- keylen = mpi_from_key(pcopy.prime, kdfcopy ? SIZE_MAX : buflen, &prime);
- if (keylen < 0 || !prime) {
- /* buflen == 0 may be used to query the required buffer size,
- * which is the prime key length.
- */
- ret = keylen;
- goto out;
+ memset(&dh_inputs, 0, sizeof(dh_inputs));
+
+ dlen = dh_data_from_key(pcopy.prime, &dh_inputs.p);
+ if (dlen < 0) {
+ ret = dlen;
+ goto out1;
}
+ dh_inputs.p_size = dlen;
- /* The result is never longer than the prime */
- resultlen = keylen;
-
- keylen = mpi_from_key(pcopy.base, SIZE_MAX, &base);
- if (keylen < 0 || !base) {
- ret = keylen;
- goto error1;
+ dlen = dh_data_from_key(pcopy.base, &dh_inputs.g);
+ if (dlen < 0) {
+ ret = dlen;
+ goto out2;
}
+ dh_inputs.g_size = dlen;
- keylen = mpi_from_key(pcopy.private, SIZE_MAX, &private);
- if (keylen < 0 || !private) {
- ret = keylen;
- goto error2;
+ dlen = dh_data_from_key(pcopy.private, &dh_inputs.key);
+ if (dlen < 0) {
+ ret = dlen;
+ goto out2;
}
+ dh_inputs.key_size = dlen;
- result = mpi_alloc(0);
- if (!result) {
+ secretlen = crypto_dh_key_len(&dh_inputs);
+ secret = kmalloc(secretlen, GFP_KERNEL);
+ if (!secret) {
ret = -ENOMEM;
- goto error3;
+ goto out2;
}
-
- /* allocate space for DH shared secret and SP800-56A otherinfo */
- kbuf = kmalloc(kdfcopy ? (resultlen + kdfcopy->otherinfolen) : resultlen,
- GFP_KERNEL);
- if (!kbuf) {
- ret = -ENOMEM;
- goto error4;
- }
-
- /*
- * Concatenate SP800-56A otherinfo past DH shared secret -- the
- * input to the KDF is (DH shared secret || otherinfo)
- */
- if (kdfcopy && kdfcopy->otherinfo &&
- copy_from_user(kbuf + resultlen, kdfcopy->otherinfo,
- kdfcopy->otherinfolen) != 0) {
- ret = -EFAULT;
- goto error5;
- }
-
- ret = do_dh(result, base, private, prime);
+ ret = crypto_dh_encode_key(secret, secretlen, &dh_inputs);
if (ret)
- goto error5;
+ goto out3;
- ret = mpi_read_buffer(result, kbuf, resultlen, &nbytes, NULL);
- if (ret != 0)
- goto error5;
+ tfm = crypto_alloc_kpp("dh", CRYPTO_ALG_TYPE_KPP, 0);
+ if (IS_ERR(tfm)) {
+ ret = PTR_ERR(tfm);
+ goto out3;
+ }
+
+ ret = crypto_kpp_set_secret(tfm, secret, secretlen);
+ if (ret)
+ goto out4;
+
+ outlen = crypto_kpp_maxsize(tfm);
+
+ if (!kdfcopy) {
+ /*
+ * When not using a KDF, buflen 0 is used to read the
+ * required buffer length
+ */
+ if (buflen == 0) {
+ ret = outlen;
+ goto out4;
+ } else if (outlen > buflen) {
+ ret = -EOVERFLOW;
+ goto out4;
+ }
+ }
+
+ outbuf = kzalloc(kdfcopy ? (outlen + kdfcopy->otherinfolen) : outlen,
+ GFP_KERNEL);
+ if (!outbuf) {
+ ret = -ENOMEM;
+ goto out4;
+ }
+
+ sg_init_one(&outsg, outbuf, outlen);
+
+ req = kpp_request_alloc(tfm, GFP_KERNEL);
+ if (!req) {
+ ret = -ENOMEM;
+ goto out5;
+ }
+
+ kpp_request_set_input(req, NULL, 0);
+ kpp_request_set_output(req, &outsg, outlen);
+ init_completion(&compl.completion);
+ kpp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
+ CRYPTO_TFM_REQ_MAY_SLEEP,
+ dh_crypto_done, &compl);
+
+ /*
+ * For DH, generate_public_key and generate_shared_secret are
+ * the same calculation
+ */
+ ret = crypto_kpp_generate_public_key(req);
+ if (ret == -EINPROGRESS) {
+ wait_for_completion(&compl.completion);
+ ret = compl.err;
+ if (ret)
+ goto out6;
+ }
if (kdfcopy) {
- ret = keyctl_dh_compute_kdf(sdesc, buffer, buflen, kbuf,
- resultlen + kdfcopy->otherinfolen);
- } else {
- ret = nbytes;
- if (copy_to_user(buffer, kbuf, nbytes) != 0)
+ /*
+ * Concatenate SP800-56A otherinfo past DH shared secret -- the
+ * input to the KDF is (DH shared secret || otherinfo)
+ */
+ if (copy_from_user(outbuf + req->dst_len, kdfcopy->otherinfo,
+ kdfcopy->otherinfolen) != 0) {
ret = -EFAULT;
+ goto out6;
+ }
+
+ ret = keyctl_dh_compute_kdf(sdesc, buffer, buflen, outbuf,
+ req->dst_len + kdfcopy->otherinfolen,
+ outlen - req->dst_len);
+ } else if (copy_to_user(buffer, outbuf, req->dst_len) == 0) {
+ ret = req->dst_len;
+ } else {
+ ret = -EFAULT;
}
-error5:
- kzfree(kbuf);
-error4:
- mpi_free(result);
-error3:
- mpi_free(private);
-error2:
- mpi_free(base);
-error1:
- mpi_free(prime);
-out:
+out6:
+ kpp_request_free(req);
+out5:
+ kzfree(outbuf);
+out4:
+ crypto_free_kpp(tfm);
+out3:
+ kzfree(secret);
+out2:
+ dh_free_data(&dh_inputs);
+out1:
kdf_dealloc(sdesc);
return ret;
}
diff --git a/security/keys/encrypted-keys/encrypted.c b/security/keys/encrypted-keys/encrypted.c
index 0010955..bb6324d 100644
--- a/security/keys/encrypted-keys/encrypted.c
+++ b/security/keys/encrypted-keys/encrypted.c
@@ -30,6 +30,7 @@
#include <linux/scatterlist.h>
#include <linux/ctype.h>
#include <crypto/aes.h>
+#include <crypto/algapi.h>
#include <crypto/hash.h>
#include <crypto/sha.h>
#include <crypto/skcipher.h>
@@ -54,13 +55,7 @@
#define MAX_DATA_SIZE 4096
#define MIN_DATA_SIZE 20
-struct sdesc {
- struct shash_desc shash;
- char ctx[];
-};
-
-static struct crypto_shash *hashalg;
-static struct crypto_shash *hmacalg;
+static struct crypto_shash *hash_tfm;
enum {
Opt_err = -1, Opt_new, Opt_load, Opt_update
@@ -141,23 +136,22 @@
*/
static int valid_master_desc(const char *new_desc, const char *orig_desc)
{
- if (!memcmp(new_desc, KEY_TRUSTED_PREFIX, KEY_TRUSTED_PREFIX_LEN)) {
- if (strlen(new_desc) == KEY_TRUSTED_PREFIX_LEN)
- goto out;
- if (orig_desc)
- if (memcmp(new_desc, orig_desc, KEY_TRUSTED_PREFIX_LEN))
- goto out;
- } else if (!memcmp(new_desc, KEY_USER_PREFIX, KEY_USER_PREFIX_LEN)) {
- if (strlen(new_desc) == KEY_USER_PREFIX_LEN)
- goto out;
- if (orig_desc)
- if (memcmp(new_desc, orig_desc, KEY_USER_PREFIX_LEN))
- goto out;
- } else
- goto out;
+ int prefix_len;
+
+ if (!strncmp(new_desc, KEY_TRUSTED_PREFIX, KEY_TRUSTED_PREFIX_LEN))
+ prefix_len = KEY_TRUSTED_PREFIX_LEN;
+ else if (!strncmp(new_desc, KEY_USER_PREFIX, KEY_USER_PREFIX_LEN))
+ prefix_len = KEY_USER_PREFIX_LEN;
+ else
+ return -EINVAL;
+
+ if (!new_desc[prefix_len])
+ return -EINVAL;
+
+ if (orig_desc && strncmp(new_desc, orig_desc, prefix_len))
+ return -EINVAL;
+
return 0;
-out:
- return -EINVAL;
}
/*
@@ -321,53 +315,38 @@
return ukey;
}
-static struct sdesc *alloc_sdesc(struct crypto_shash *alg)
+static int calc_hash(struct crypto_shash *tfm, u8 *digest,
+ const u8 *buf, unsigned int buflen)
{
- struct sdesc *sdesc;
- int size;
+ SHASH_DESC_ON_STACK(desc, tfm);
+ int err;
- size = sizeof(struct shash_desc) + crypto_shash_descsize(alg);
- sdesc = kmalloc(size, GFP_KERNEL);
- if (!sdesc)
- return ERR_PTR(-ENOMEM);
- sdesc->shash.tfm = alg;
- sdesc->shash.flags = 0x0;
- return sdesc;
+ desc->tfm = tfm;
+ desc->flags = 0;
+
+ err = crypto_shash_digest(desc, buf, buflen, digest);
+ shash_desc_zero(desc);
+ return err;
}
static int calc_hmac(u8 *digest, const u8 *key, unsigned int keylen,
const u8 *buf, unsigned int buflen)
{
- struct sdesc *sdesc;
- int ret;
+ struct crypto_shash *tfm;
+ int err;
- sdesc = alloc_sdesc(hmacalg);
- if (IS_ERR(sdesc)) {
- pr_info("encrypted_key: can't alloc %s\n", hmac_alg);
- return PTR_ERR(sdesc);
+ tfm = crypto_alloc_shash(hmac_alg, 0, CRYPTO_ALG_ASYNC);
+ if (IS_ERR(tfm)) {
+ pr_err("encrypted_key: can't alloc %s transform: %ld\n",
+ hmac_alg, PTR_ERR(tfm));
+ return PTR_ERR(tfm);
}
- ret = crypto_shash_setkey(hmacalg, key, keylen);
- if (!ret)
- ret = crypto_shash_digest(&sdesc->shash, buf, buflen, digest);
- kfree(sdesc);
- return ret;
-}
-
-static int calc_hash(u8 *digest, const u8 *buf, unsigned int buflen)
-{
- struct sdesc *sdesc;
- int ret;
-
- sdesc = alloc_sdesc(hashalg);
- if (IS_ERR(sdesc)) {
- pr_info("encrypted_key: can't alloc %s\n", hash_alg);
- return PTR_ERR(sdesc);
- }
-
- ret = crypto_shash_digest(&sdesc->shash, buf, buflen, digest);
- kfree(sdesc);
- return ret;
+ err = crypto_shash_setkey(tfm, key, keylen);
+ if (!err)
+ err = calc_hash(tfm, digest, buf, buflen);
+ crypto_free_shash(tfm);
+ return err;
}
enum derived_key_type { ENC_KEY, AUTH_KEY };
@@ -385,10 +364,9 @@
derived_buf_len = HASH_SIZE;
derived_buf = kzalloc(derived_buf_len, GFP_KERNEL);
- if (!derived_buf) {
- pr_err("encrypted_key: out of memory\n");
+ if (!derived_buf)
return -ENOMEM;
- }
+
if (key_type)
strcpy(derived_buf, "AUTH_KEY");
else
@@ -396,8 +374,8 @@
memcpy(derived_buf + strlen(derived_buf) + 1, master_key,
master_keylen);
- ret = calc_hash(derived_key, derived_buf, derived_buf_len);
- kfree(derived_buf);
+ ret = calc_hash(hash_tfm, derived_key, derived_buf, derived_buf_len);
+ kzfree(derived_buf);
return ret;
}
@@ -480,12 +458,9 @@
struct skcipher_request *req;
unsigned int encrypted_datalen;
u8 iv[AES_BLOCK_SIZE];
- unsigned int padlen;
- char pad[16];
int ret;
encrypted_datalen = roundup(epayload->decrypted_datalen, blksize);
- padlen = encrypted_datalen - epayload->decrypted_datalen;
req = init_skcipher_req(derived_key, derived_keylen);
ret = PTR_ERR(req);
@@ -493,11 +468,10 @@
goto out;
dump_decrypted_data(epayload);
- memset(pad, 0, sizeof pad);
sg_init_table(sg_in, 2);
sg_set_buf(&sg_in[0], epayload->decrypted_data,
epayload->decrypted_datalen);
- sg_set_buf(&sg_in[1], pad, padlen);
+ sg_set_page(&sg_in[1], ZERO_PAGE(0), AES_BLOCK_SIZE, 0);
sg_init_table(sg_out, 1);
sg_set_buf(sg_out, epayload->encrypted_data, encrypted_datalen);
@@ -533,6 +507,7 @@
if (!ret)
dump_hmac(NULL, digest, HASH_SIZE);
out:
+ memzero_explicit(derived_key, sizeof(derived_key));
return ret;
}
@@ -561,8 +536,8 @@
ret = calc_hmac(digest, derived_key, sizeof derived_key, p, len);
if (ret < 0)
goto out;
- ret = memcmp(digest, epayload->format + epayload->datablob_len,
- sizeof digest);
+ ret = crypto_memneq(digest, epayload->format + epayload->datablob_len,
+ sizeof(digest));
if (ret) {
ret = -EINVAL;
dump_hmac("datablob",
@@ -571,6 +546,7 @@
dump_hmac("calc", digest, HASH_SIZE);
}
out:
+ memzero_explicit(derived_key, sizeof(derived_key));
return ret;
}
@@ -584,9 +560,14 @@
struct skcipher_request *req;
unsigned int encrypted_datalen;
u8 iv[AES_BLOCK_SIZE];
- char pad[16];
+ u8 *pad;
int ret;
+ /* Throwaway buffer to hold the unused zero padding at the end */
+ pad = kmalloc(AES_BLOCK_SIZE, GFP_KERNEL);
+ if (!pad)
+ return -ENOMEM;
+
encrypted_datalen = roundup(epayload->decrypted_datalen, blksize);
req = init_skcipher_req(derived_key, derived_keylen);
ret = PTR_ERR(req);
@@ -594,13 +575,12 @@
goto out;
dump_encrypted_data(epayload, encrypted_datalen);
- memset(pad, 0, sizeof pad);
sg_init_table(sg_in, 1);
sg_init_table(sg_out, 2);
sg_set_buf(sg_in, epayload->encrypted_data, encrypted_datalen);
sg_set_buf(&sg_out[0], epayload->decrypted_data,
epayload->decrypted_datalen);
- sg_set_buf(&sg_out[1], pad, sizeof pad);
+ sg_set_buf(&sg_out[1], pad, AES_BLOCK_SIZE);
memcpy(iv, epayload->iv, sizeof(iv));
skcipher_request_set_crypt(req, sg_in, sg_out, encrypted_datalen, iv);
@@ -612,6 +592,7 @@
goto out;
dump_decrypted_data(epayload);
out:
+ kfree(pad);
return ret;
}
@@ -722,6 +703,7 @@
out:
up_read(&mkey->sem);
key_put(mkey);
+ memzero_explicit(derived_key, sizeof(derived_key));
return ret;
}
@@ -828,13 +810,13 @@
ret = encrypted_init(epayload, key->description, format, master_desc,
decrypted_datalen, hex_encoded_iv);
if (ret < 0) {
- kfree(epayload);
+ kzfree(epayload);
goto out;
}
rcu_assign_keypointer(key, epayload);
out:
- kfree(datablob);
+ kzfree(datablob);
return ret;
}
@@ -843,8 +825,7 @@
struct encrypted_key_payload *epayload;
epayload = container_of(rcu, struct encrypted_key_payload, rcu);
- memset(epayload->decrypted_data, 0, epayload->decrypted_datalen);
- kfree(epayload);
+ kzfree(epayload);
}
/*
@@ -902,7 +883,7 @@
rcu_assign_keypointer(key, new_epayload);
call_rcu(&epayload->rcu, encrypted_rcu_free);
out:
- kfree(buf);
+ kzfree(buf);
return ret;
}
@@ -960,33 +941,26 @@
up_read(&mkey->sem);
key_put(mkey);
+ memzero_explicit(derived_key, sizeof(derived_key));
if (copy_to_user(buffer, ascii_buf, asciiblob_len) != 0)
ret = -EFAULT;
- kfree(ascii_buf);
+ kzfree(ascii_buf);
return asciiblob_len;
out:
up_read(&mkey->sem);
key_put(mkey);
+ memzero_explicit(derived_key, sizeof(derived_key));
return ret;
}
/*
- * encrypted_destroy - before freeing the key, clear the decrypted data
- *
- * Before freeing the key, clear the memory containing the decrypted
- * key data.
+ * encrypted_destroy - clear and free the key's payload
*/
static void encrypted_destroy(struct key *key)
{
- struct encrypted_key_payload *epayload = key->payload.data[0];
-
- if (!epayload)
- return;
-
- memzero_explicit(epayload->decrypted_data, epayload->decrypted_datalen);
- kfree(key->payload.data[0]);
+ kzfree(key->payload.data[0]);
}
struct key_type key_type_encrypted = {
@@ -999,47 +973,17 @@
};
EXPORT_SYMBOL_GPL(key_type_encrypted);
-static void encrypted_shash_release(void)
-{
- if (hashalg)
- crypto_free_shash(hashalg);
- if (hmacalg)
- crypto_free_shash(hmacalg);
-}
-
-static int __init encrypted_shash_alloc(void)
-{
- int ret;
-
- hmacalg = crypto_alloc_shash(hmac_alg, 0, CRYPTO_ALG_ASYNC);
- if (IS_ERR(hmacalg)) {
- pr_info("encrypted_key: could not allocate crypto %s\n",
- hmac_alg);
- return PTR_ERR(hmacalg);
- }
-
- hashalg = crypto_alloc_shash(hash_alg, 0, CRYPTO_ALG_ASYNC);
- if (IS_ERR(hashalg)) {
- pr_info("encrypted_key: could not allocate crypto %s\n",
- hash_alg);
- ret = PTR_ERR(hashalg);
- goto hashalg_fail;
- }
-
- return 0;
-
-hashalg_fail:
- crypto_free_shash(hmacalg);
- return ret;
-}
-
static int __init init_encrypted(void)
{
int ret;
- ret = encrypted_shash_alloc();
- if (ret < 0)
- return ret;
+ hash_tfm = crypto_alloc_shash(hash_alg, 0, CRYPTO_ALG_ASYNC);
+ if (IS_ERR(hash_tfm)) {
+ pr_err("encrypted_key: can't allocate %s transform: %ld\n",
+ hash_alg, PTR_ERR(hash_tfm));
+ return PTR_ERR(hash_tfm);
+ }
+
ret = aes_get_sizes();
if (ret < 0)
goto out;
@@ -1048,14 +992,14 @@
goto out;
return 0;
out:
- encrypted_shash_release();
+ crypto_free_shash(hash_tfm);
return ret;
}
static void __exit cleanup_encrypted(void)
{
- encrypted_shash_release();
+ crypto_free_shash(hash_tfm);
unregister_key_type(&key_type_encrypted);
}
diff --git a/security/keys/gc.c b/security/keys/gc.c
index 595becc..87cb260 100644
--- a/security/keys/gc.c
+++ b/security/keys/gc.c
@@ -158,9 +158,7 @@
kfree(key->description);
-#ifdef KEY_DEBUGGING
- key->magic = KEY_DEBUG_MAGIC_X;
-#endif
+ memzero_explicit(key, sizeof(*key));
kmem_cache_free(key_jar, key);
}
}
diff --git a/security/keys/key.c b/security/keys/key.c
index 455c04d..83da68d 100644
--- a/security/keys/key.c
+++ b/security/keys/key.c
@@ -660,14 +660,11 @@
goto error;
found:
- /* pretend it doesn't exist if it is awaiting deletion */
- if (refcount_read(&key->usage) == 0)
- goto not_found;
-
- /* this races with key_put(), but that doesn't matter since key_put()
- * doesn't actually change the key
+ /* A key is allowed to be looked up only if someone still owns a
+ * reference to it - otherwise it's awaiting the gc.
*/
- __key_get(key);
+ if (!refcount_inc_not_zero(&key->usage))
+ goto not_found;
error:
spin_unlock(&key_serial_lock);
@@ -966,12 +963,11 @@
/* the key must be writable */
ret = key_permission(key_ref, KEY_NEED_WRITE);
if (ret < 0)
- goto error;
+ return ret;
/* attempt to update it if supported */
- ret = -EOPNOTSUPP;
if (!key->type->update)
- goto error;
+ return -EOPNOTSUPP;
memset(&prep, 0, sizeof(prep));
prep.data = payload;
diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
index 447a7d5..ab0b337 100644
--- a/security/keys/keyctl.c
+++ b/security/keys/keyctl.c
@@ -99,7 +99,7 @@
/* pull the payload in if one was supplied */
payload = NULL;
- if (_payload) {
+ if (plen) {
ret = -ENOMEM;
payload = kvmalloc(plen, GFP_KERNEL);
if (!payload)
@@ -132,7 +132,10 @@
key_ref_put(keyring_ref);
error3:
- kvfree(payload);
+ if (payload) {
+ memzero_explicit(payload, plen);
+ kvfree(payload);
+ }
error2:
kfree(description);
error:
@@ -324,7 +327,7 @@
/* pull the payload in if one was supplied */
payload = NULL;
- if (_payload) {
+ if (plen) {
ret = -ENOMEM;
payload = kmalloc(plen, GFP_KERNEL);
if (!payload)
@@ -347,7 +350,7 @@
key_ref_put(key_ref);
error2:
- kfree(payload);
+ kzfree(payload);
error:
return ret;
}
@@ -1093,7 +1096,10 @@
keyctl_change_reqkey_auth(NULL);
error2:
- kvfree(payload);
+ if (payload) {
+ memzero_explicit(payload, plen);
+ kvfree(payload);
+ }
error:
return ret;
}
diff --git a/security/keys/keyring.c b/security/keys/keyring.c
index 4d1678e4..de81793 100644
--- a/security/keys/keyring.c
+++ b/security/keys/keyring.c
@@ -706,7 +706,7 @@
* Non-keyrings avoid the leftmost branch of the root entirely (root
* slots 1-15).
*/
- ptr = ACCESS_ONCE(keyring->keys.root);
+ ptr = READ_ONCE(keyring->keys.root);
if (!ptr)
goto not_this_keyring;
@@ -720,7 +720,7 @@
if ((shortcut->index_key[0] & ASSOC_ARRAY_FAN_MASK) != 0)
goto not_this_keyring;
- ptr = ACCESS_ONCE(shortcut->next_node);
+ ptr = READ_ONCE(shortcut->next_node);
node = assoc_array_ptr_to_node(ptr);
goto begin_node;
}
@@ -740,7 +740,7 @@
if (assoc_array_ptr_is_shortcut(ptr)) {
shortcut = assoc_array_ptr_to_shortcut(ptr);
smp_read_barrier_depends();
- ptr = ACCESS_ONCE(shortcut->next_node);
+ ptr = READ_ONCE(shortcut->next_node);
BUG_ON(!assoc_array_ptr_is_node(ptr));
}
node = assoc_array_ptr_to_node(ptr);
@@ -752,7 +752,7 @@
ascend_to_node:
/* Go through the slots in a node */
for (; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
- ptr = ACCESS_ONCE(node->slots[slot]);
+ ptr = READ_ONCE(node->slots[slot]);
if (assoc_array_ptr_is_meta(ptr) && node->back_pointer)
goto descend_to_node;
@@ -790,13 +790,13 @@
/* We've dealt with all the slots in the current node, so now we need
* to ascend to the parent and continue processing there.
*/
- ptr = ACCESS_ONCE(node->back_pointer);
+ ptr = READ_ONCE(node->back_pointer);
slot = node->parent_slot;
if (ptr && assoc_array_ptr_is_shortcut(ptr)) {
shortcut = assoc_array_ptr_to_shortcut(ptr);
smp_read_barrier_depends();
- ptr = ACCESS_ONCE(shortcut->back_pointer);
+ ptr = READ_ONCE(shortcut->back_pointer);
slot = shortcut->parent_slot;
}
if (!ptr)
diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
index 2217dfe..86bced9 100644
--- a/security/keys/process_keys.c
+++ b/security/keys/process_keys.c
@@ -809,15 +809,14 @@
ret = PTR_ERR(keyring);
goto error2;
} else if (keyring == new->session_keyring) {
- key_put(keyring);
ret = 0;
- goto error2;
+ goto error3;
}
/* we've got a keyring - now to install it */
ret = install_session_keyring_to_cred(new, keyring);
if (ret < 0)
- goto error2;
+ goto error3;
commit_creds(new);
mutex_unlock(&key_session_mutex);
@@ -827,6 +826,8 @@
okay:
return ret;
+error3:
+ key_put(keyring);
error2:
mutex_unlock(&key_session_mutex);
error:
diff --git a/security/keys/trusted.c b/security/keys/trusted.c
index 2ae31c5..435e86e 100644
--- a/security/keys/trusted.c
+++ b/security/keys/trusted.c
@@ -70,7 +70,7 @@
}
ret = crypto_shash_digest(&sdesc->shash, data, datalen, digest);
- kfree(sdesc);
+ kzfree(sdesc);
return ret;
}
@@ -114,7 +114,7 @@
if (!ret)
ret = crypto_shash_final(&sdesc->shash, digest);
out:
- kfree(sdesc);
+ kzfree(sdesc);
return ret;
}
@@ -165,7 +165,7 @@
paramdigest, TPM_NONCE_SIZE, h1,
TPM_NONCE_SIZE, h2, 1, &c, 0, 0);
out:
- kfree(sdesc);
+ kzfree(sdesc);
return ret;
}
@@ -246,7 +246,7 @@
if (memcmp(testhmac, authdata, SHA1_DIGEST_SIZE))
ret = -EINVAL;
out:
- kfree(sdesc);
+ kzfree(sdesc);
return ret;
}
@@ -347,7 +347,7 @@
if (memcmp(testhmac2, authdata2, SHA1_DIGEST_SIZE))
ret = -EINVAL;
out:
- kfree(sdesc);
+ kzfree(sdesc);
return ret;
}
@@ -564,7 +564,7 @@
*bloblen = storedsize;
}
out:
- kfree(td);
+ kzfree(td);
return ret;
}
@@ -678,7 +678,7 @@
if (ret < 0)
pr_info("trusted_key: srkseal failed (%d)\n", ret);
- kfree(tb);
+ kzfree(tb);
return ret;
}
@@ -703,7 +703,7 @@
/* pull migratable flag out of sealed key */
p->migratable = p->key[--p->key_len];
- kfree(tb);
+ kzfree(tb);
return ret;
}
@@ -1037,12 +1037,12 @@
if (!ret && options->pcrlock)
ret = pcrlock(options->pcrlock);
out:
- kfree(datablob);
- kfree(options);
+ kzfree(datablob);
+ kzfree(options);
if (!ret)
rcu_assign_keypointer(key, payload);
else
- kfree(payload);
+ kzfree(payload);
return ret;
}
@@ -1051,8 +1051,7 @@
struct trusted_key_payload *p;
p = container_of(rcu, struct trusted_key_payload, rcu);
- memset(p->key, 0, p->key_len);
- kfree(p);
+ kzfree(p);
}
/*
@@ -1094,13 +1093,13 @@
ret = datablob_parse(datablob, new_p, new_o);
if (ret != Opt_update) {
ret = -EINVAL;
- kfree(new_p);
+ kzfree(new_p);
goto out;
}
if (!new_o->keyhandle) {
ret = -EINVAL;
- kfree(new_p);
+ kzfree(new_p);
goto out;
}
@@ -1114,22 +1113,22 @@
ret = key_seal(new_p, new_o);
if (ret < 0) {
pr_info("trusted_key: key_seal failed (%d)\n", ret);
- kfree(new_p);
+ kzfree(new_p);
goto out;
}
if (new_o->pcrlock) {
ret = pcrlock(new_o->pcrlock);
if (ret < 0) {
pr_info("trusted_key: pcrlock failed (%d)\n", ret);
- kfree(new_p);
+ kzfree(new_p);
goto out;
}
}
rcu_assign_keypointer(key, new_p);
call_rcu(&p->rcu, trusted_rcu_free);
out:
- kfree(datablob);
- kfree(new_o);
+ kzfree(datablob);
+ kzfree(new_o);
return ret;
}
@@ -1158,24 +1157,19 @@
for (i = 0; i < p->blob_len; i++)
bufp = hex_byte_pack(bufp, p->blob[i]);
if ((copy_to_user(buffer, ascii_buf, 2 * p->blob_len)) != 0) {
- kfree(ascii_buf);
+ kzfree(ascii_buf);
return -EFAULT;
}
- kfree(ascii_buf);
+ kzfree(ascii_buf);
return 2 * p->blob_len;
}
/*
- * trusted_destroy - before freeing the key, clear the decrypted data
+ * trusted_destroy - clear and free the key's payload
*/
static void trusted_destroy(struct key *key)
{
- struct trusted_key_payload *p = key->payload.data[0];
-
- if (!p)
- return;
- memset(p->key, 0, p->key_len);
- kfree(key->payload.data[0]);
+ kzfree(key->payload.data[0]);
}
struct key_type key_type_trusted = {
diff --git a/security/keys/user_defined.c b/security/keys/user_defined.c
index 2660513..3d8c68e 100644
--- a/security/keys/user_defined.c
+++ b/security/keys/user_defined.c
@@ -86,10 +86,18 @@
*/
void user_free_preparse(struct key_preparsed_payload *prep)
{
- kfree(prep->payload.data[0]);
+ kzfree(prep->payload.data[0]);
}
EXPORT_SYMBOL_GPL(user_free_preparse);
+static void user_free_payload_rcu(struct rcu_head *head)
+{
+ struct user_key_payload *payload;
+
+ payload = container_of(head, struct user_key_payload, rcu);
+ kzfree(payload);
+}
+
/*
* update a user defined key
* - the key's semaphore is write-locked
@@ -112,7 +120,7 @@
prep->payload.data[0] = NULL;
if (zap)
- kfree_rcu(zap, rcu);
+ call_rcu(&zap->rcu, user_free_payload_rcu);
return ret;
}
EXPORT_SYMBOL_GPL(user_update);
@@ -130,7 +138,7 @@
if (upayload) {
rcu_assign_keypointer(key, NULL);
- kfree_rcu(upayload, rcu);
+ call_rcu(&upayload->rcu, user_free_payload_rcu);
}
}
@@ -143,7 +151,7 @@
{
struct user_key_payload *upayload = key->payload.data[0];
- kfree(upayload);
+ kzfree(upayload);
}
EXPORT_SYMBOL_GPL(user_destroy);
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index e67a526..819fd68 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -1106,10 +1106,8 @@
opts->mnt_opts_flags = kcalloc(NUM_SEL_MNT_OPTS, sizeof(int),
GFP_KERNEL);
- if (!opts->mnt_opts_flags) {
- kfree(opts->mnt_opts);
+ if (!opts->mnt_opts_flags)
goto out_err;
- }
if (fscontext) {
opts->mnt_opts[num_mnt_opts] = fscontext;
@@ -1132,6 +1130,7 @@
return 0;
out_err:
+ security_free_mnt_opts(opts);
kfree(context);
kfree(defcontext);
kfree(fscontext);
diff --git a/sound/core/timer.c b/sound/core/timer.c
index 2f836ca..cd67d1c 100644
--- a/sound/core/timer.c
+++ b/sound/core/timer.c
@@ -1618,6 +1618,7 @@
if (err < 0)
goto __err;
+ tu->qhead = tu->qtail = tu->qused = 0;
kfree(tu->queue);
tu->queue = NULL;
kfree(tu->tqueue);
@@ -1959,6 +1960,7 @@
tu = file->private_data;
unit = tu->tread ? sizeof(struct snd_timer_tread) : sizeof(struct snd_timer_read);
+ mutex_lock(&tu->ioctl_lock);
spin_lock_irq(&tu->qlock);
while ((long)count - result >= unit) {
while (!tu->qused) {
@@ -1974,7 +1976,9 @@
add_wait_queue(&tu->qchange_sleep, &wait);
spin_unlock_irq(&tu->qlock);
+ mutex_unlock(&tu->ioctl_lock);
schedule();
+ mutex_lock(&tu->ioctl_lock);
spin_lock_irq(&tu->qlock);
remove_wait_queue(&tu->qchange_sleep, &wait);
@@ -1994,7 +1998,6 @@
tu->qused--;
spin_unlock_irq(&tu->qlock);
- mutex_lock(&tu->ioctl_lock);
if (tu->tread) {
if (copy_to_user(buffer, &tu->tqueue[qhead],
sizeof(struct snd_timer_tread)))
@@ -2004,7 +2007,6 @@
sizeof(struct snd_timer_read)))
err = -EFAULT;
}
- mutex_unlock(&tu->ioctl_lock);
spin_lock_irq(&tu->qlock);
if (err < 0)
@@ -2014,6 +2016,7 @@
}
_error:
spin_unlock_irq(&tu->qlock);
+ mutex_unlock(&tu->ioctl_lock);
return result > 0 ? result : err;
}
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index a57988d..cbeebc0 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -5854,7 +5854,11 @@
SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
SND_PCI_QUIRK(0x1043, 0x10c0, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
+ SND_PCI_QUIRK(0x1043, 0x10d0, "ASUS X540LA/X540LJ", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1043, 0x115d, "Asus 1015E", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+ SND_PCI_QUIRK(0x1043, 0x11c0, "ASUS X556UR", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1043, 0x1290, "ASUS X441SA", ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1043, 0x12a0, "ASUS X441UV", ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1043, 0x12f0, "ASUS X541UV", ALC256_FIXUP_ASUS_MIC),
SND_PCI_QUIRK(0x1043, 0x12e0, "ASUS X541SA", ALC256_FIXUP_ASUS_MIC),
SND_PCI_QUIRK(0x1043, 0x13b0, "ASUS Z550SA", ALC256_FIXUP_ASUS_MIC),
@@ -5862,13 +5866,10 @@
SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A),
SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC),
SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW),
+ SND_PCI_QUIRK(0x1043, 0x1a30, "ASUS X705UD", ALC256_FIXUP_ASUS_MIC),
SND_PCI_QUIRK(0x1043, 0x1b13, "Asus U41SV", ALC269_FIXUP_INV_DMIC),
- SND_PCI_QUIRK(0x1043, 0x1c23, "Asus X55U", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
SND_PCI_QUIRK(0x1043, 0x1bbd, "ASUS Z550MA", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
- SND_PCI_QUIRK(0x1043, 0x10d0, "ASUS X540LA/X540LJ", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
- SND_PCI_QUIRK(0x1043, 0x11c0, "ASUS X556UR", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
- SND_PCI_QUIRK(0x1043, 0x1290, "ASUS X441SA", ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE),
- SND_PCI_QUIRK(0x1043, 0x12a0, "ASUS X441UV", ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1043, 0x1c23, "Asus X55U", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
SND_PCI_QUIRK(0x1043, 0x1ccd, "ASUS X555UB", ALC256_FIXUP_ASUS_MIC),
SND_PCI_QUIRK(0x1043, 0x3030, "ASUS ZN270IE", ALC256_FIXUP_ASUS_AIO_GPIO2),
SND_PCI_QUIRK(0x1043, 0x831a, "ASUS P901", ALC269_FIXUP_STEREO_DMIC),
diff --git a/sound/soc/atmel/atmel-classd.c b/sound/soc/atmel/atmel-classd.c
index 7ae46c2..b7ef8c5 100644
--- a/sound/soc/atmel/atmel-classd.c
+++ b/sound/soc/atmel/atmel-classd.c
@@ -301,6 +301,14 @@
return 0;
}
+static int atmel_classd_codec_resume(struct snd_soc_codec *codec)
+{
+ struct snd_soc_card *card = snd_soc_codec_get_drvdata(codec);
+ struct atmel_classd *dd = snd_soc_card_get_drvdata(card);
+
+ return regcache_sync(dd->regmap);
+}
+
static struct regmap *atmel_classd_codec_get_remap(struct device *dev)
{
return dev_get_regmap(dev, NULL);
@@ -308,6 +316,7 @@
static struct snd_soc_codec_driver soc_codec_dev_classd = {
.probe = atmel_classd_codec_probe,
+ .resume = atmel_classd_codec_resume,
.get_regmap = atmel_classd_codec_get_remap,
.component_driver = {
.controls = atmel_classd_snd_controls,
diff --git a/sound/soc/codecs/da7213.c b/sound/soc/codecs/da7213.c
index 6dd7578..024d83f 100644
--- a/sound/soc/codecs/da7213.c
+++ b/sound/soc/codecs/da7213.c
@@ -772,7 +772,7 @@
++i;
msleep(50);
}
- } while ((i < DA7213_SRM_CHECK_RETRIES) & (!srm_lock));
+ } while ((i < DA7213_SRM_CHECK_RETRIES) && (!srm_lock));
if (!srm_lock)
dev_warn(codec->dev, "SRM failed to lock\n");
diff --git a/sound/soc/codecs/rt286.c b/sound/soc/codecs/rt286.c
index 9c365a7..7899a2c 100644
--- a/sound/soc/codecs/rt286.c
+++ b/sound/soc/codecs/rt286.c
@@ -1108,6 +1108,13 @@
DMI_MATCH(DMI_PRODUCT_NAME, "Kabylake Client platform")
}
},
+ {
+ .ident = "Thinkpad Helix 2nd",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad Helix 2nd")
+ }
+ },
{ }
};
diff --git a/sound/soc/generic/simple-card.c b/sound/soc/generic/simple-card.c
index 2c9deda..bc136d2 100644
--- a/sound/soc/generic/simple-card.c
+++ b/sound/soc/generic/simple-card.c
@@ -202,7 +202,7 @@
if (ret < 0)
return ret;
- ret = asoc_simple_card_init_mic(rtd->card, &priv->hp_jack, PREFIX);
+ ret = asoc_simple_card_init_mic(rtd->card, &priv->mic_jack, PREFIX);
if (ret < 0)
return ret;
diff --git a/sound/soc/intel/skylake/skl-sst-ipc.c b/sound/soc/intel/skylake/skl-sst-ipc.c
index 58c5250..498b153 100644
--- a/sound/soc/intel/skylake/skl-sst-ipc.c
+++ b/sound/soc/intel/skylake/skl-sst-ipc.c
@@ -413,8 +413,11 @@
u32 reply = header.primary & IPC_GLB_REPLY_STATUS_MASK;
u64 *ipc_header = (u64 *)(&header);
struct skl_sst *skl = container_of(ipc, struct skl_sst, ipc);
+ unsigned long flags;
+ spin_lock_irqsave(&ipc->dsp->spinlock, flags);
msg = skl_ipc_reply_get_msg(ipc, *ipc_header);
+ spin_unlock_irqrestore(&ipc->dsp->spinlock, flags);
if (msg == NULL) {
dev_dbg(ipc->dev, "ipc: rx list is empty\n");
return;
@@ -456,8 +459,10 @@
}
}
+ spin_lock_irqsave(&ipc->dsp->spinlock, flags);
list_del(&msg->list);
sst_ipc_tx_msg_reply_complete(ipc, msg);
+ spin_unlock_irqrestore(&ipc->dsp->spinlock, flags);
}
irqreturn_t skl_dsp_irq_thread_handler(int irq, void *context)
diff --git a/sound/soc/intel/skylake/skl-topology.c b/sound/soc/intel/skylake/skl-topology.c
index 3a99712..64a0f8e 100644
--- a/sound/soc/intel/skylake/skl-topology.c
+++ b/sound/soc/intel/skylake/skl-topology.c
@@ -2502,7 +2502,7 @@
if (ret < 0)
return ret;
- tkn_count += ret;
+ tkn_count = ret;
tuple_size += tkn_count *
sizeof(struct snd_soc_tplg_vendor_string_elem);
diff --git a/sound/soc/intel/skylake/skl.c b/sound/soc/intel/skylake/skl.c
index 6df3b31..4c9b578 100644
--- a/sound/soc/intel/skylake/skl.c
+++ b/sound/soc/intel/skylake/skl.c
@@ -410,7 +410,7 @@
struct skl *skl = ebus_to_skl(ebus);
struct hdac_bus *bus = ebus_to_hbus(ebus);
- skl->init_failed = 1; /* to be sure */
+ skl->init_done = 0; /* to be sure */
snd_hdac_ext_stop_streams(ebus);
@@ -428,8 +428,10 @@
snd_hdac_ext_bus_exit(ebus);
+ cancel_work_sync(&skl->probe_work);
if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI))
snd_hdac_i915_exit(&ebus->bus);
+
return 0;
}
@@ -566,6 +568,84 @@
.get_response = snd_hdac_bus_get_response,
};
+static int skl_i915_init(struct hdac_bus *bus)
+{
+ int err;
+
+ /*
+ * The HDMI codec is in GPU so we need to ensure that it is powered
+ * up and ready for probe
+ */
+ err = snd_hdac_i915_init(bus);
+ if (err < 0)
+ return err;
+
+ err = snd_hdac_display_power(bus, true);
+ if (err < 0)
+ dev_err(bus->dev, "Cannot turn on display power on i915\n");
+
+ return err;
+}
+
+static void skl_probe_work(struct work_struct *work)
+{
+ struct skl *skl = container_of(work, struct skl, probe_work);
+ struct hdac_ext_bus *ebus = &skl->ebus;
+ struct hdac_bus *bus = ebus_to_hbus(ebus);
+ struct hdac_ext_link *hlink = NULL;
+ int err;
+
+ if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI)) {
+ err = skl_i915_init(bus);
+ if (err < 0)
+ return;
+ }
+
+ err = skl_init_chip(bus, true);
+ if (err < 0) {
+ dev_err(bus->dev, "Init chip failed with err: %d\n", err);
+ goto out_err;
+ }
+
+ /* codec detection */
+ if (!bus->codec_mask)
+ dev_info(bus->dev, "no hda codecs found!\n");
+
+ /* create codec instances */
+ err = skl_codec_create(ebus);
+ if (err < 0)
+ goto out_err;
+
+ if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI)) {
+ err = snd_hdac_display_power(bus, false);
+ if (err < 0) {
+ dev_err(bus->dev, "Cannot turn off display power on i915\n");
+ return;
+ }
+ }
+
+ /* register platform dai and controls */
+ err = skl_platform_register(bus->dev);
+ if (err < 0)
+ return;
+ /*
+ * we are done probing so decrement link counts
+ */
+ list_for_each_entry(hlink, &ebus->hlink_list, list)
+ snd_hdac_ext_bus_link_put(ebus, hlink);
+
+ /* configure PM */
+ pm_runtime_put_noidle(bus->dev);
+ pm_runtime_allow(bus->dev);
+ skl->init_done = 1;
+
+ return;
+
+out_err:
+ if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI))
+ err = snd_hdac_display_power(bus, false);
+}
+
/*
* constructor
*/
@@ -593,6 +673,7 @@
snd_hdac_ext_bus_init(ebus, &pci->dev, &bus_core_ops, io_ops);
ebus->bus.use_posbuf = 1;
skl->pci = pci;
+ INIT_WORK(&skl->probe_work, skl_probe_work);
ebus->bus.bdl_pos_adj = 0;
@@ -601,27 +682,6 @@
return 0;
}
-static int skl_i915_init(struct hdac_bus *bus)
-{
- int err;
-
- /*
- * The HDMI codec is in GPU so we need to ensure that it is powered
- * up and ready for probe
- */
- err = snd_hdac_i915_init(bus);
- if (err < 0)
- return err;
-
- err = snd_hdac_display_power(bus, true);
- if (err < 0) {
- dev_err(bus->dev, "Cannot turn on display power on i915\n");
- return err;
- }
-
- return err;
-}
-
static int skl_first_init(struct hdac_ext_bus *ebus)
{
struct skl *skl = ebus_to_skl(ebus);
@@ -684,20 +744,7 @@
/* initialize chip */
skl_init_pci(skl);
- if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI)) {
- err = skl_i915_init(bus);
- if (err < 0)
- return err;
- }
-
- skl_init_chip(bus, true);
-
- /* codec detection */
- if (!bus->codec_mask) {
- dev_info(bus->dev, "no hda codecs found!\n");
- }
-
- return 0;
+ return skl_init_chip(bus, true);
}
static int skl_probe(struct pci_dev *pci,
@@ -706,7 +753,6 @@
struct skl *skl;
struct hdac_ext_bus *ebus = NULL;
struct hdac_bus *bus = NULL;
- struct hdac_ext_link *hlink = NULL;
int err;
/* we use ext core ops, so provide NULL for ops here */
@@ -729,7 +775,7 @@
if (skl->nhlt == NULL) {
err = -ENODEV;
- goto out_display_power_off;
+ goto out_free;
}
err = skl_nhlt_create_sysfs(skl);
@@ -760,56 +806,24 @@
if (bus->mlcap)
snd_hdac_ext_bus_get_ml_capabilities(ebus);
+ snd_hdac_bus_stop_chip(bus);
+
/* create device for soc dmic */
err = skl_dmic_device_register(skl);
if (err < 0)
goto out_dsp_free;
- /* register platform dai and controls */
- err = skl_platform_register(bus->dev);
- if (err < 0)
- goto out_dmic_free;
-
- /* create codec instances */
- err = skl_codec_create(ebus);
- if (err < 0)
- goto out_unregister;
-
- if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI)) {
- err = snd_hdac_display_power(bus, false);
- if (err < 0) {
- dev_err(bus->dev, "Cannot turn off display power on i915\n");
- return err;
- }
- }
-
- /*
- * we are done probling so decrement link counts
- */
- list_for_each_entry(hlink, &ebus->hlink_list, list)
- snd_hdac_ext_bus_link_put(ebus, hlink);
-
- /* configure PM */
- pm_runtime_put_noidle(bus->dev);
- pm_runtime_allow(bus->dev);
+ schedule_work(&skl->probe_work);
return 0;
-out_unregister:
- skl_platform_unregister(bus->dev);
-out_dmic_free:
- skl_dmic_device_unregister(skl);
out_dsp_free:
skl_free_dsp(skl);
out_mach_free:
skl_machine_device_unregister(skl);
out_nhlt_free:
skl_nhlt_free(skl->nhlt);
-out_display_power_off:
- if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI))
- snd_hdac_display_power(bus, false);
out_free:
- skl->init_failed = 1;
skl_free(ebus);
return err;
@@ -828,7 +842,7 @@
skl = ebus_to_skl(ebus);
- if (skl->init_failed)
+ if (!skl->init_done)
return;
snd_hdac_ext_stop_streams(ebus);
diff --git a/sound/soc/intel/skylake/skl.h b/sound/soc/intel/skylake/skl.h
index a454f60..2a630fc 100644
--- a/sound/soc/intel/skylake/skl.h
+++ b/sound/soc/intel/skylake/skl.h
@@ -46,7 +46,7 @@
struct hdac_ext_bus ebus;
struct pci_dev *pci;
- unsigned int init_failed:1; /* delayed init failed */
+ unsigned int init_done:1; /* delayed init status */
struct platform_device *dmic_dev;
struct platform_device *i2s_dev;
struct snd_soc_platform *platform;
@@ -64,6 +64,8 @@
const struct firmware *tplg;
int supend_active;
+
+ struct work_struct probe_work;
};
#define skl_to_ebus(s) (&(s)->ebus)
diff --git a/sound/soc/sh/rcar/adg.c b/sound/soc/sh/rcar/adg.c
index 66203d1..d3b0dc1 100644
--- a/sound/soc/sh/rcar/adg.c
+++ b/sound/soc/sh/rcar/adg.c
@@ -507,7 +507,8 @@
rbga = rbgx;
adg->rbga_rate_for_441khz = rate / div;
ckr |= brg_table[i] << 20;
- if (req_441kHz_rate)
+ if (req_441kHz_rate &&
+ !(adg_mode_flags(adg) & AUDIO_OUT_48))
parent_clk_name = __clk_get_name(clk);
}
}
@@ -522,7 +523,8 @@
rbgb = rbgx;
adg->rbgb_rate_for_48khz = rate / div;
ckr |= brg_table[i] << 16;
- if (req_48kHz_rate)
+ if (req_48kHz_rate &&
+ (adg_mode_flags(adg) & AUDIO_OUT_48))
parent_clk_name = __clk_get_name(clk);
}
}
diff --git a/sound/soc/sh/rcar/cmd.c b/sound/soc/sh/rcar/cmd.c
index 7d92a24..d879c01 100644
--- a/sound/soc/sh/rcar/cmd.c
+++ b/sound/soc/sh/rcar/cmd.c
@@ -89,6 +89,7 @@
dev_dbg(dev, "ctu/mix path = 0x%08x", data);
rsnd_mod_write(mod, CMD_ROUTE_SLCT, data);
+ rsnd_mod_write(mod, CMD_BUSIF_MODE, rsnd_get_busif_shift(io, mod) | 1);
rsnd_mod_write(mod, CMD_BUSIF_DALIGN, rsnd_get_dalign(mod, io));
rsnd_adg_set_cmd_timsel_gen2(mod, io);
diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c
index 1744015..8c1f4e2 100644
--- a/sound/soc/sh/rcar/core.c
+++ b/sound/soc/sh/rcar/core.c
@@ -343,6 +343,57 @@
return 0x76543210;
}
+u32 rsnd_get_busif_shift(struct rsnd_dai_stream *io, struct rsnd_mod *mod)
+{
+ enum rsnd_mod_type playback_mods[] = {
+ RSND_MOD_SRC,
+ RSND_MOD_CMD,
+ RSND_MOD_SSIU,
+ };
+ enum rsnd_mod_type capture_mods[] = {
+ RSND_MOD_CMD,
+ RSND_MOD_SRC,
+ RSND_MOD_SSIU,
+ };
+ struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
+ struct rsnd_mod *tmod = NULL;
+ enum rsnd_mod_type *mods =
+ rsnd_io_is_play(io) ?
+ playback_mods : capture_mods;
+ int i;
+
+ /*
+ * This is needed for 24bit data
+ * We need to shift 8bit
+ *
+ * Linux 24bit data is located as 0x00******
+ * HW 24bit data is located as 0x******00
+ *
+ */
+ switch (runtime->sample_bits) {
+ case 16:
+ return 0;
+ case 32:
+ break;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(playback_mods); i++) {
+ tmod = rsnd_io_to_mod(io, mods[i]);
+ if (tmod)
+ break;
+ }
+
+ if (tmod != mod)
+ return 0;
+
+ if (rsnd_io_is_play(io))
+ return (0 << 20) | /* shift to Left */
+ (8 << 16); /* 8bit */
+ else
+ return (1 << 20) | /* shift to Right */
+ (8 << 16); /* 8bit */
+}
+
/*
* rsnd_dai functions
*/
diff --git a/sound/soc/sh/rcar/gen.c b/sound/soc/sh/rcar/gen.c
index 63b6d3c..4b09807 100644
--- a/sound/soc/sh/rcar/gen.c
+++ b/sound/soc/sh/rcar/gen.c
@@ -236,6 +236,7 @@
RSND_GEN_M_REG(SRC_ROUTE_MODE0, 0xc, 0x20),
RSND_GEN_M_REG(SRC_CTRL, 0x10, 0x20),
RSND_GEN_M_REG(SRC_INT_ENABLE0, 0x18, 0x20),
+ RSND_GEN_M_REG(CMD_BUSIF_MODE, 0x184, 0x20),
RSND_GEN_M_REG(CMD_BUSIF_DALIGN,0x188, 0x20),
RSND_GEN_M_REG(CMD_ROUTE_SLCT, 0x18c, 0x20),
RSND_GEN_M_REG(CMD_CTRL, 0x190, 0x20),
diff --git a/sound/soc/sh/rcar/rsnd.h b/sound/soc/sh/rcar/rsnd.h
index dbf4163..323af41 100644
--- a/sound/soc/sh/rcar/rsnd.h
+++ b/sound/soc/sh/rcar/rsnd.h
@@ -73,6 +73,7 @@
RSND_REG_SCU_SYS_INT_EN0,
RSND_REG_SCU_SYS_INT_EN1,
RSND_REG_CMD_CTRL,
+ RSND_REG_CMD_BUSIF_MODE,
RSND_REG_CMD_BUSIF_DALIGN,
RSND_REG_CMD_ROUTE_SLCT,
RSND_REG_CMDOUT_TIMSEL,
@@ -204,6 +205,7 @@
u32 mask, u32 data);
u32 rsnd_get_adinr_bit(struct rsnd_mod *mod, struct rsnd_dai_stream *io);
u32 rsnd_get_dalign(struct rsnd_mod *mod, struct rsnd_dai_stream *io);
+u32 rsnd_get_busif_shift(struct rsnd_dai_stream *io, struct rsnd_mod *mod);
/*
* R-Car DMA
diff --git a/sound/soc/sh/rcar/src.c b/sound/soc/sh/rcar/src.c
index 20b5b2e..76a477a 100644
--- a/sound/soc/sh/rcar/src.c
+++ b/sound/soc/sh/rcar/src.c
@@ -190,11 +190,13 @@
struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
struct device *dev = rsnd_priv_to_dev(priv);
struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
+ int is_play = rsnd_io_is_play(io);
int use_src = 0;
u32 fin, fout;
u32 ifscr, fsrate, adinr;
u32 cr, route;
u32 bsdsr, bsisr;
+ u32 i_busif, o_busif, tmp;
uint ratio;
if (!runtime)
@@ -270,6 +272,11 @@
break;
}
+ /* BUSIF_MODE */
+ tmp = rsnd_get_busif_shift(io, mod);
+ i_busif = ( is_play ? tmp : 0) | 1;
+ o_busif = (!is_play ? tmp : 0) | 1;
+
rsnd_mod_write(mod, SRC_ROUTE_MODE0, route);
rsnd_mod_write(mod, SRC_SRCIR, 1); /* initialize */
@@ -281,8 +288,9 @@
rsnd_mod_write(mod, SRC_BSISR, bsisr);
rsnd_mod_write(mod, SRC_SRCIR, 0); /* cancel initialize */
- rsnd_mod_write(mod, SRC_I_BUSIF_MODE, 1);
- rsnd_mod_write(mod, SRC_O_BUSIF_MODE, 1);
+ rsnd_mod_write(mod, SRC_I_BUSIF_MODE, i_busif);
+ rsnd_mod_write(mod, SRC_O_BUSIF_MODE, o_busif);
+
rsnd_mod_write(mod, SRC_BUSIF_DALIGN, rsnd_get_dalign(mod, io));
rsnd_adg_set_src_timesel_gen2(mod, io, fin, fout);
diff --git a/sound/soc/sh/rcar/ssi.c b/sound/soc/sh/rcar/ssi.c
index 135c566..91e5c07 100644
--- a/sound/soc/sh/rcar/ssi.c
+++ b/sound/soc/sh/rcar/ssi.c
@@ -302,7 +302,7 @@
* always use 32bit system word.
* see also rsnd_ssi_master_clk_enable()
*/
- cr_own = FORCE | SWL_32 | PDTA;
+ cr_own = FORCE | SWL_32;
if (rdai->bit_clk_inv)
cr_own |= SCKP;
@@ -550,6 +550,13 @@
struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
u32 *buf = (u32 *)(runtime->dma_area +
rsnd_dai_pointer_offset(io, 0));
+ int shift = 0;
+
+ switch (runtime->sample_bits) {
+ case 32:
+ shift = 8;
+ break;
+ }
/*
* 8/16/32 data can be assesse to TDR/RDR register
@@ -557,9 +564,9 @@
* see rsnd_ssi_init()
*/
if (rsnd_io_is_play(io))
- rsnd_mod_write(mod, SSITDR, *buf);
+ rsnd_mod_write(mod, SSITDR, (*buf) << shift);
else
- *buf = rsnd_mod_read(mod, SSIRDR);
+ *buf = (rsnd_mod_read(mod, SSIRDR) >> shift);
elapsed = rsnd_dai_pointer_update(io, sizeof(*buf));
}
@@ -709,6 +716,11 @@
struct rsnd_priv *priv)
{
struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
+ struct rsnd_mod *ssi_parent_mod = rsnd_io_to_mod_ssip(io);
+
+ /* Do nothing for SSI parent mod */
+ if (ssi_parent_mod == mod)
+ return 0;
/* PIO will request IRQ again */
free_irq(ssi->irq, mod);
diff --git a/sound/soc/sh/rcar/ssiu.c b/sound/soc/sh/rcar/ssiu.c
index 14fafda..512d238 100644
--- a/sound/soc/sh/rcar/ssiu.c
+++ b/sound/soc/sh/rcar/ssiu.c
@@ -144,7 +144,8 @@
(rsnd_io_is_play(io) ?
rsnd_runtime_channel_after_ctu(io) :
rsnd_runtime_channel_original(io)));
- rsnd_mod_write(mod, SSI_BUSIF_MODE, 1);
+ rsnd_mod_write(mod, SSI_BUSIF_MODE,
+ rsnd_get_busif_shift(io, mod) | 1);
rsnd_mod_write(mod, SSI_BUSIF_DALIGN,
rsnd_get_dalign(mod, io));
}
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index aae099c..754e3ef 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -2286,6 +2286,9 @@
list_for_each_entry(rtd, &card->rtd_list, list)
flush_delayed_work(&rtd->delayed_work);
+ /* free the ALSA card at first; this syncs with pending operations */
+ snd_card_free(card->snd_card);
+
/* remove and free each DAI */
soc_remove_dai_links(card);
soc_remove_pcm_runtimes(card);
@@ -2300,9 +2303,7 @@
if (card->remove)
card->remove(card);
- snd_card_free(card->snd_card);
return 0;
-
}
/* removes a socdev */
diff --git a/tools/objtool/builtin-check.c b/tools/objtool/builtin-check.c
index 282a603..5f66697f 100644
--- a/tools/objtool/builtin-check.c
+++ b/tools/objtool/builtin-check.c
@@ -192,7 +192,8 @@
"complete_and_exit",
"kvm_spurious_fault",
"__reiserfs_panic",
- "lbug_with_loc"
+ "lbug_with_loc",
+ "fortify_panic",
};
if (func->bind == STB_WEAK)
diff --git a/tools/perf/Documentation/perf-probe.txt b/tools/perf/Documentation/perf-probe.txt
index e6c9902..165c2b1 100644
--- a/tools/perf/Documentation/perf-probe.txt
+++ b/tools/perf/Documentation/perf-probe.txt
@@ -240,9 +240,13 @@
or
./perf probe --add='schedule:12 cpu'
- this will add one or more probes which has the name start with "schedule".
+Add one or more probes which has the name start with "schedule".
- Add probes on lines in schedule() function which calls update_rq_clock().
+ ./perf probe schedule*
+ or
+ ./perf probe --add='schedule*'
+
+Add probes on lines in schedule() function which calls update_rq_clock().
./perf probe 'schedule;update_rq_clock*'
or
diff --git a/tools/perf/Documentation/perf-script-perl.txt b/tools/perf/Documentation/perf-script-perl.txt
index dfbb506..142606c 100644
--- a/tools/perf/Documentation/perf-script-perl.txt
+++ b/tools/perf/Documentation/perf-script-perl.txt
@@ -39,7 +39,7 @@
When perf script is invoked using a trace script, a user-defined
'handler function' is called for each event in the trace. If there's
no handler function defined for a given event type, the event is
-ignored (or passed to a 'trace_handled' function, see below) and the
+ignored (or passed to a 'trace_unhandled' function, see below) and the
next event is processed.
Most of the event's field values are passed as arguments to the
diff --git a/tools/perf/Documentation/perf-script-python.txt b/tools/perf/Documentation/perf-script-python.txt
index 54acba2..51ec2d2 100644
--- a/tools/perf/Documentation/perf-script-python.txt
+++ b/tools/perf/Documentation/perf-script-python.txt
@@ -149,10 +149,8 @@
print "id=%d, args=%s\n" % \
(id, args),
-def trace_unhandled(event_name, context, common_cpu, common_secs, common_nsecs,
- common_pid, common_comm):
- print_header(event_name, common_cpu, common_secs, common_nsecs,
- common_pid, common_comm)
+def trace_unhandled(event_name, context, event_fields_dict):
+ print ' '.join(['%s=%s'%(k,str(v))for k,v in sorted(event_fields_dict.items())])
def print_header(event_name, cpu, secs, nsecs, pid, comm):
print "%-20s %5u %05u.%09u %8u %-20s " % \
@@ -321,7 +319,7 @@
process can be generalized to any tracepoint or set of tracepoints
you're interested in - basically find the tracepoint(s) you're
interested in by looking at the list of available events shown by
-'perf list' and/or look in /sys/kernel/debug/tracing events for
+'perf list' and/or look in /sys/kernel/debug/tracing/events/ for
detailed event and field info, record the corresponding trace data
using 'perf record', passing it the list of interesting events,
generate a skeleton script using 'perf script -g python' and modify the
@@ -334,7 +332,7 @@
scripts listed by the 'perf script -l' command e.g.:
----
-root@tropicana:~# perf script -l
+# perf script -l
List of available trace scripts:
wakeup-latency system-wide min/max/avg wakeup latency
rw-by-file <comm> r/w activity for a program, by file
@@ -383,8 +381,6 @@
----
# ls -al kernel-source/tools/perf/scripts/python
-
-root@tropicana:/home/trz/src/tip# ls -al tools/perf/scripts/python
total 32
drwxr-xr-x 4 trz trz 4096 2010-01-26 22:30 .
drwxr-xr-x 4 trz trz 4096 2010-01-26 22:29 ..
@@ -399,7 +395,7 @@
should show a new entry for your script:
----
-root@tropicana:~# perf script -l
+# perf script -l
List of available trace scripts:
wakeup-latency system-wide min/max/avg wakeup latency
rw-by-file <comm> r/w activity for a program, by file
@@ -437,7 +433,7 @@
When perf script is invoked using a trace script, a user-defined
'handler function' is called for each event in the trace. If there's
no handler function defined for a given event type, the event is
-ignored (or passed to a 'trace_handled' function, see below) and the
+ignored (or passed to a 'trace_unhandled' function, see below) and the
next event is processed.
Most of the event's field values are passed as arguments to the
@@ -532,7 +528,7 @@
gives scripts a chance to do setup tasks:
----
-def trace_begin:
+def trace_begin():
pass
----
@@ -541,7 +537,7 @@
as display results:
----
-def trace_end:
+def trace_end():
pass
----
@@ -550,8 +546,7 @@
of common arguments are passed into it:
----
-def trace_unhandled(event_name, context, common_cpu, common_secs,
- common_nsecs, common_pid, common_comm):
+def trace_unhandled(event_name, context, event_fields_dict):
pass
----
diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
index 8354d04..1f4fbc9 100644
--- a/tools/perf/Makefile.config
+++ b/tools/perf/Makefile.config
@@ -19,18 +19,18 @@
include $(srctree)/tools/scripts/Makefile.arch
-$(call detected_var,ARCH)
+$(call detected_var,SRCARCH)
NO_PERF_REGS := 1
# Additional ARCH settings for ppc
-ifeq ($(ARCH),powerpc)
+ifeq ($(SRCARCH),powerpc)
NO_PERF_REGS := 0
LIBUNWIND_LIBS := -lunwind -lunwind-ppc64
endif
# Additional ARCH settings for x86
-ifeq ($(ARCH),x86)
+ifeq ($(SRCARCH),x86)
$(call detected,CONFIG_X86)
ifeq (${IS_64_BIT}, 1)
CFLAGS += -DHAVE_ARCH_X86_64_SUPPORT -DHAVE_SYSCALL_TABLE -I$(OUTPUT)arch/x86/include/generated
@@ -43,12 +43,12 @@
NO_PERF_REGS := 0
endif
-ifeq ($(ARCH),arm)
+ifeq ($(SRCARCH),arm)
NO_PERF_REGS := 0
LIBUNWIND_LIBS = -lunwind -lunwind-arm
endif
-ifeq ($(ARCH),arm64)
+ifeq ($(SRCARCH),arm64)
NO_PERF_REGS := 0
LIBUNWIND_LIBS = -lunwind -lunwind-aarch64
endif
@@ -61,7 +61,7 @@
# Disable it on all other architectures in case libdw unwind
# support is detected in system. Add supported architectures
# to the check.
-ifneq ($(ARCH),$(filter $(ARCH),x86 arm))
+ifneq ($(SRCARCH),$(filter $(SRCARCH),x86 arm))
NO_LIBDW_DWARF_UNWIND := 1
endif
@@ -115,9 +115,9 @@
FEATURE_CHECK_CFLAGS-libbabeltrace := $(LIBBABELTRACE_CFLAGS)
FEATURE_CHECK_LDFLAGS-libbabeltrace := $(LIBBABELTRACE_LDFLAGS) -lbabeltrace-ctf
-FEATURE_CHECK_CFLAGS-bpf = -I. -I$(srctree)/tools/include -I$(srctree)/tools/arch/$(ARCH)/include/uapi -I$(srctree)/tools/include/uapi
+FEATURE_CHECK_CFLAGS-bpf = -I. -I$(srctree)/tools/include -I$(srctree)/tools/arch/$(SRCARCH)/include/uapi -I$(srctree)/tools/include/uapi
# include ARCH specific config
--include $(src-perf)/arch/$(ARCH)/Makefile
+-include $(src-perf)/arch/$(SRCARCH)/Makefile
ifdef PERF_HAVE_ARCH_REGS_QUERY_REGISTER_OFFSET
CFLAGS += -DHAVE_ARCH_REGS_QUERY_REGISTER_OFFSET
@@ -228,12 +228,12 @@
endif
INC_FLAGS += -I$(src-perf)/util/include
-INC_FLAGS += -I$(src-perf)/arch/$(ARCH)/include
+INC_FLAGS += -I$(src-perf)/arch/$(SRCARCH)/include
INC_FLAGS += -I$(srctree)/tools/include/uapi
INC_FLAGS += -I$(srctree)/tools/include/
-INC_FLAGS += -I$(srctree)/tools/arch/$(ARCH)/include/uapi
-INC_FLAGS += -I$(srctree)/tools/arch/$(ARCH)/include/
-INC_FLAGS += -I$(srctree)/tools/arch/$(ARCH)/
+INC_FLAGS += -I$(srctree)/tools/arch/$(SRCARCH)/include/uapi
+INC_FLAGS += -I$(srctree)/tools/arch/$(SRCARCH)/include/
+INC_FLAGS += -I$(srctree)/tools/arch/$(SRCARCH)/
# $(obj-perf) for generated common-cmds.h
# $(obj-perf)/util for generated bison/flex headers
@@ -355,7 +355,7 @@
ifndef NO_DWARF
ifeq ($(origin PERF_HAVE_DWARF_REGS), undefined)
- msg := $(warning DWARF register mappings have not been defined for architecture $(ARCH), DWARF support disabled);
+ msg := $(warning DWARF register mappings have not been defined for architecture $(SRCARCH), DWARF support disabled);
NO_DWARF := 1
else
CFLAGS += -DHAVE_DWARF_SUPPORT $(LIBDW_CFLAGS)
@@ -380,7 +380,7 @@
CFLAGS += -DHAVE_BPF_PROLOGUE
$(call detected,CONFIG_BPF_PROLOGUE)
else
- msg := $(warning BPF prologue is not supported by architecture $(ARCH), missing regs_query_register_offset());
+ msg := $(warning BPF prologue is not supported by architecture $(SRCARCH), missing regs_query_register_offset());
endif
else
msg := $(warning DWARF support is off, BPF prologue is disabled);
@@ -406,7 +406,7 @@
endif
endif
-ifeq ($(ARCH),powerpc)
+ifeq ($(SRCARCH),powerpc)
ifndef NO_DWARF
CFLAGS += -DHAVE_SKIP_CALLCHAIN_IDX
endif
@@ -487,7 +487,7 @@
endif
ifndef NO_LOCAL_LIBUNWIND
- ifeq ($(ARCH),$(filter $(ARCH),arm arm64))
+ ifeq ($(SRCARCH),$(filter $(SRCARCH),arm arm64))
$(call feature_check,libunwind-debug-frame)
ifneq ($(feature-libunwind-debug-frame), 1)
msg := $(warning No debug_frame support found in libunwind);
@@ -740,7 +740,7 @@
NO_PERF_READ_VDSO32 := 1
endif
endif
- ifneq ($(ARCH), x86)
+ ifneq ($(SRCARCH), x86)
NO_PERF_READ_VDSOX32 := 1
endif
ifndef NO_PERF_READ_VDSOX32
@@ -769,7 +769,7 @@
endif
ifndef NO_AUXTRACE
- ifeq ($(ARCH),x86)
+ ifeq ($(SRCARCH),x86)
ifeq ($(feature-get_cpuid), 0)
msg := $(warning Your gcc lacks the __get_cpuid() builtin, disables support for auxtrace/Intel PT, please install a newer gcc);
NO_AUXTRACE := 1
@@ -872,7 +872,7 @@
ETC_PERFCONFIG = etc/perfconfig
endif
ifndef lib
-ifeq ($(ARCH)$(IS_64_BIT), x861)
+ifeq ($(SRCARCH)$(IS_64_BIT), x861)
lib = lib64
else
lib = lib
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
index 79fe31f..5008f51 100644
--- a/tools/perf/Makefile.perf
+++ b/tools/perf/Makefile.perf
@@ -226,7 +226,7 @@
ifeq ($(config),0)
include $(srctree)/tools/scripts/Makefile.arch
--include arch/$(ARCH)/Makefile
+-include arch/$(SRCARCH)/Makefile
endif
# The FEATURE_DUMP_EXPORT holds location of the actual
diff --git a/tools/perf/arch/Build b/tools/perf/arch/Build
index 109eb75..d9b6af8 100644
--- a/tools/perf/arch/Build
+++ b/tools/perf/arch/Build
@@ -1,2 +1,2 @@
libperf-y += common.o
-libperf-y += $(ARCH)/
+libperf-y += $(SRCARCH)/
diff --git a/tools/perf/arch/common.c b/tools/perf/arch/common.c
index 837067f..6b40e9f 100644
--- a/tools/perf/arch/common.c
+++ b/tools/perf/arch/common.c
@@ -26,6 +26,7 @@
const char *const powerpc_triplets[] = {
"powerpc-unknown-linux-gnu-",
+ "powerpc-linux-gnu-",
"powerpc64-unknown-linux-gnu-",
"powerpc64-linux-gnu-",
"powerpc64le-linux-gnu-",
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index a935b50..ad9324d1 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -1578,6 +1578,7 @@
static void print_footer(void)
{
FILE *output = stat_config.output;
+ int n;
if (!null_run)
fprintf(output, "\n");
@@ -1590,7 +1591,9 @@
}
fprintf(output, "\n\n");
- if (print_free_counters_hint)
+ if (print_free_counters_hint &&
+ sysctl__read_int("kernel/nmi_watchdog", &n) >= 0 &&
+ n > 0)
fprintf(output,
"Some events weren't counted. Try disabling the NMI watchdog:\n"
" echo 0 > /proc/sys/kernel/nmi_watchdog\n"
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index d014350..4b2a5d2 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -681,6 +681,10 @@
{ .name = "mlockall", .errmsg = true,
.arg_scnprintf = { [0] = SCA_HEX, /* addr */ }, },
{ .name = "mmap", .hexret = true,
+/* The standard mmap maps to old_mmap on s390x */
+#if defined(__s390x__)
+ .alias = "old_mmap",
+#endif
.arg_scnprintf = { [0] = SCA_HEX, /* addr */
[2] = SCA_MMAP_PROT, /* prot */
[3] = SCA_MMAP_FLAGS, /* flags */ }, },
diff --git a/tools/perf/pmu-events/Build b/tools/perf/pmu-events/Build
index 9213a12..999a4e8 100644
--- a/tools/perf/pmu-events/Build
+++ b/tools/perf/pmu-events/Build
@@ -2,7 +2,7 @@
jevents-y += json.o jsmn.o jevents.o
pmu-events-y += pmu-events.o
-JDIR = pmu-events/arch/$(ARCH)
+JDIR = pmu-events/arch/$(SRCARCH)
JSON = $(shell [ -d $(JDIR) ] && \
find $(JDIR) -name '*.json' -o -name 'mapfile.csv')
#
@@ -10,4 +10,4 @@
# directory and create tables in pmu-events.c.
#
$(OUTPUT)pmu-events/pmu-events.c: $(JSON) $(JEVENTS)
- $(Q)$(call echo-cmd,gen)$(JEVENTS) $(ARCH) pmu-events/arch $(OUTPUT)pmu-events/pmu-events.c $(V)
+ $(Q)$(call echo-cmd,gen)$(JEVENTS) $(SRCARCH) pmu-events/arch $(OUTPUT)pmu-events/pmu-events.c $(V)
diff --git a/tools/perf/tests/Build b/tools/perf/tests/Build
index af58ebc..84222bd 100644
--- a/tools/perf/tests/Build
+++ b/tools/perf/tests/Build
@@ -75,7 +75,7 @@
$(Q)sed -e 's/"/\\"/g' -e 's/\(.*\)/"\1\\n"/g' $< >> $@
$(Q)echo ';' >> $@
-ifeq ($(ARCH),$(filter $(ARCH),x86 arm arm64 powerpc))
+ifeq ($(SRCARCH),$(filter $(SRCARCH),x86 arm arm64 powerpc))
perf-$(CONFIG_DWARF_UNWIND) += dwarf-unwind.o
endif
diff --git a/tools/perf/tests/bp_signal.c b/tools/perf/tests/bp_signal.c
index e7664fe..8ba2c46 100644
--- a/tools/perf/tests/bp_signal.c
+++ b/tools/perf/tests/bp_signal.c
@@ -288,3 +288,17 @@
return count1 == 1 && overflows == 3 && count2 == 3 && overflows_2 == 3 && count3 == 2 ?
TEST_OK : TEST_FAIL;
}
+
+bool test__bp_signal_is_supported(void)
+{
+/*
+ * The powerpc so far does not have support to even create
+ * instruction breakpoint using the perf event interface.
+ * Once it's there we can release this.
+ */
+#ifdef __powerpc__
+ return false;
+#else
+ return true;
+#endif
+}
diff --git a/tools/perf/tests/builtin-test.c b/tools/perf/tests/builtin-test.c
index 9e08d29..3ccfd58 100644
--- a/tools/perf/tests/builtin-test.c
+++ b/tools/perf/tests/builtin-test.c
@@ -97,10 +97,12 @@
{
.desc = "Breakpoint overflow signal handler",
.func = test__bp_signal,
+ .is_supported = test__bp_signal_is_supported,
},
{
.desc = "Breakpoint overflow sampling",
.func = test__bp_signal_overflow,
+ .is_supported = test__bp_signal_is_supported,
},
{
.desc = "Number of exit events of a simple workload",
@@ -401,6 +403,11 @@
if (!perf_test__matches(t, curr, argc, argv))
continue;
+ if (t->is_supported && !t->is_supported()) {
+ pr_debug("%2d: %-*s: Disabled\n", i, width, t->desc);
+ continue;
+ }
+
pr_info("%2d: %-*s:", i, width, t->desc);
if (intlist__find(skiplist, i)) {
diff --git a/tools/perf/tests/code-reading.c b/tools/perf/tests/code-reading.c
index 1f14e76..94b7c7b 100644
--- a/tools/perf/tests/code-reading.c
+++ b/tools/perf/tests/code-reading.c
@@ -229,6 +229,8 @@
unsigned char buf2[BUFSZ];
size_t ret_len;
u64 objdump_addr;
+ const char *objdump_name;
+ char decomp_name[KMOD_DECOMP_LEN];
int ret;
pr_debug("Reading object code for memory address: %#"PRIx64"\n", addr);
@@ -289,9 +291,25 @@
state->done[state->done_cnt++] = al.map->start;
}
+ objdump_name = al.map->dso->long_name;
+ if (dso__needs_decompress(al.map->dso)) {
+ if (dso__decompress_kmodule_path(al.map->dso, objdump_name,
+ decomp_name,
+ sizeof(decomp_name)) < 0) {
+ pr_debug("decompression failed\n");
+ return -1;
+ }
+
+ objdump_name = decomp_name;
+ }
+
/* Read the object code using objdump */
objdump_addr = map__rip_2objdump(al.map, al.addr);
- ret = read_via_objdump(al.map->dso->long_name, objdump_addr, buf2, len);
+ ret = read_via_objdump(objdump_name, objdump_addr, buf2, len);
+
+ if (dso__needs_decompress(al.map->dso))
+ unlink(objdump_name);
+
if (ret > 0) {
/*
* The kernel maps are inaccurate - assume objdump is right in
diff --git a/tools/perf/tests/task-exit.c b/tools/perf/tests/task-exit.c
index 32873ec..cf00eba 100644
--- a/tools/perf/tests/task-exit.c
+++ b/tools/perf/tests/task-exit.c
@@ -83,7 +83,7 @@
evsel = perf_evlist__first(evlist);
evsel->attr.task = 1;
- evsel->attr.sample_freq = 0;
+ evsel->attr.sample_freq = 1;
evsel->attr.inherit = 0;
evsel->attr.watermark = 0;
evsel->attr.wakeup_events = 1;
diff --git a/tools/perf/tests/tests.h b/tools/perf/tests/tests.h
index 6318596..5773638 100644
--- a/tools/perf/tests/tests.h
+++ b/tools/perf/tests/tests.h
@@ -34,6 +34,7 @@
int (*get_nr)(void);
const char *(*get_desc)(int subtest);
} subtest;
+ bool (*is_supported)(void);
};
/* Tests */
@@ -99,6 +100,8 @@
int test__clang_subtest_get_nr(void);
int test__unit_number__scnprint(int subtest);
+bool test__bp_signal_is_supported(void);
+
#if defined(__arm__) || defined(__aarch64__)
#ifdef HAVE_DWARF_UNWIND_SUPPORT
struct thread;
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index 683f834..ddbd56d 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -239,10 +239,20 @@
const char *s = strchr(ops->raw, '+');
const char *c = strchr(ops->raw, ',');
- if (c++ != NULL)
+ /*
+ * skip over possible up to 2 operands to get to address, e.g.:
+ * tbnz w0, #26, ffff0000083cd190 <security_file_permission+0xd0>
+ */
+ if (c++ != NULL) {
ops->target.addr = strtoull(c, NULL, 16);
- else
+ if (!ops->target.addr) {
+ c = strchr(c, ',');
+ if (c++ != NULL)
+ ops->target.addr = strtoull(c, NULL, 16);
+ }
+ } else {
ops->target.addr = strtoull(ops->raw, NULL, 16);
+ }
if (s++ != NULL) {
ops->target.offset = strtoull(s, NULL, 16);
@@ -257,10 +267,27 @@
static int jump__scnprintf(struct ins *ins, char *bf, size_t size,
struct ins_operands *ops)
{
+ const char *c = strchr(ops->raw, ',');
+
if (!ops->target.addr || ops->target.offset < 0)
return ins__raw_scnprintf(ins, bf, size, ops);
- return scnprintf(bf, size, "%-6.6s %" PRIx64, ins->name, ops->target.offset);
+ if (c != NULL) {
+ const char *c2 = strchr(c + 1, ',');
+
+ /* check for 3-op insn */
+ if (c2 != NULL)
+ c = c2;
+ c++;
+
+ /* mirror arch objdump's space-after-comma style */
+ if (*c == ' ')
+ c++;
+ }
+
+ return scnprintf(bf, size, "%-6.6s %.*s%" PRIx64,
+ ins->name, c ? c - ops->raw : 0, ops->raw,
+ ops->target.offset);
}
static struct ins_ops jump_ops = {
@@ -1294,6 +1321,7 @@
char linkname[PATH_MAX];
char *build_id_filename;
char *build_id_path = NULL;
+ char *pos;
if (dso->symtab_type == DSO_BINARY_TYPE__KALLSYMS &&
!dso__is_kcore(dso))
@@ -1313,7 +1341,14 @@
if (!build_id_path)
return -1;
- dirname(build_id_path);
+ /*
+ * old style build-id cache has name of XX/XXXXXXX.. while
+ * new style has XX/XXXXXXX../{elf,kallsyms,vdso}.
+ * extract the build-id part of dirname in the new style only.
+ */
+ pos = strrchr(build_id_path, '/');
+ if (pos && strlen(pos) < SBUILD_ID_SIZE - 2)
+ dirname(build_id_path);
if (dso__is_kcore(dso) ||
readlink(build_id_path, linkname, sizeof(linkname)) < 0 ||
@@ -1396,31 +1431,10 @@
sizeof(symfs_filename));
}
} else if (dso__needs_decompress(dso)) {
- char tmp[PATH_MAX];
- struct kmod_path m;
- int fd;
- bool ret;
+ char tmp[KMOD_DECOMP_LEN];
- if (kmod_path__parse_ext(&m, symfs_filename))
- goto out;
-
- snprintf(tmp, PATH_MAX, "/tmp/perf-kmod-XXXXXX");
-
- fd = mkstemp(tmp);
- if (fd < 0) {
- free(m.ext);
- goto out;
- }
-
- ret = decompress_to_file(m.ext, symfs_filename, fd);
-
- if (ret)
- pr_err("Cannot decompress %s %s\n", m.ext, symfs_filename);
-
- free(m.ext);
- close(fd);
-
- if (!ret)
+ if (dso__decompress_kmodule_path(dso, symfs_filename,
+ tmp, sizeof(tmp)) < 0)
goto out;
strcpy(symfs_filename, tmp);
@@ -1429,7 +1443,7 @@
snprintf(command, sizeof(command),
"%s %s%s --start-address=0x%016" PRIx64
" --stop-address=0x%016" PRIx64
- " -l -d %s %s -C %s 2>/dev/null|grep -v %s:|expand",
+ " -l -d %s %s -C \"%s\" 2>/dev/null|grep -v \"%s:\"|expand",
objdump_path ? objdump_path : "objdump",
disassembler_style ? "-M " : "",
disassembler_style ? disassembler_style : "",
diff --git a/tools/perf/util/build-id.c b/tools/perf/util/build-id.c
index 168cc49..e0148b0 100644
--- a/tools/perf/util/build-id.c
+++ b/tools/perf/util/build-id.c
@@ -278,51 +278,6 @@
return bf;
}
-bool dso__build_id_is_kmod(const struct dso *dso, char *bf, size_t size)
-{
- char *id_name = NULL, *ch;
- struct stat sb;
- char sbuild_id[SBUILD_ID_SIZE];
-
- if (!dso->has_build_id)
- goto err;
-
- build_id__sprintf(dso->build_id, sizeof(dso->build_id), sbuild_id);
- id_name = build_id_cache__linkname(sbuild_id, NULL, 0);
- if (!id_name)
- goto err;
- if (access(id_name, F_OK))
- goto err;
- if (lstat(id_name, &sb) == -1)
- goto err;
- if ((size_t)sb.st_size > size - 1)
- goto err;
- if (readlink(id_name, bf, size - 1) < 0)
- goto err;
-
- bf[sb.st_size] = '\0';
-
- /*
- * link should be:
- * ../../lib/modules/4.4.0-rc4/kernel/net/ipv4/netfilter/nf_nat_ipv4.ko/a09fe3eb3147dafa4e3b31dbd6257e4d696bdc92
- */
- ch = strrchr(bf, '/');
- if (!ch)
- goto err;
- if (ch - 3 < bf)
- goto err;
-
- free(id_name);
- return strncmp(".ko", ch - 3, 3) == 0;
-err:
- pr_err("Invalid build id: %s\n", id_name ? :
- dso->long_name ? :
- dso->short_name ? :
- "[unknown]");
- free(id_name);
- return false;
-}
-
#define dsos__for_each_with_build_id(pos, head) \
list_for_each_entry(pos, head, node) \
if (!pos->has_build_id) \
diff --git a/tools/perf/util/build-id.h b/tools/perf/util/build-id.h
index 8a89b19..96690a5 100644
--- a/tools/perf/util/build-id.h
+++ b/tools/perf/util/build-id.h
@@ -17,7 +17,6 @@
size_t size);
char *dso__build_id_filename(const struct dso *dso, char *bf, size_t size);
-bool dso__build_id_is_kmod(const struct dso *dso, char *bf, size_t size);
int build_id__mark_dso_hit(struct perf_tool *tool, union perf_event *event,
struct perf_sample *sample, struct perf_evsel *evsel,
diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c
index a96a99d..4e7ab61 100644
--- a/tools/perf/util/dso.c
+++ b/tools/perf/util/dso.c
@@ -248,6 +248,64 @@
dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE_COMP;
}
+static int decompress_kmodule(struct dso *dso, const char *name, char *tmpbuf)
+{
+ int fd = -1;
+ struct kmod_path m;
+
+ if (!dso__needs_decompress(dso))
+ return -1;
+
+ if (kmod_path__parse_ext(&m, dso->long_name))
+ return -1;
+
+ if (!m.comp)
+ goto out;
+
+ fd = mkstemp(tmpbuf);
+ if (fd < 0) {
+ dso->load_errno = errno;
+ goto out;
+ }
+
+ if (!decompress_to_file(m.ext, name, fd)) {
+ dso->load_errno = DSO_LOAD_ERRNO__DECOMPRESSION_FAILURE;
+ close(fd);
+ fd = -1;
+ }
+
+out:
+ free(m.ext);
+ return fd;
+}
+
+int dso__decompress_kmodule_fd(struct dso *dso, const char *name)
+{
+ char tmpbuf[] = KMOD_DECOMP_NAME;
+ int fd;
+
+ fd = decompress_kmodule(dso, name, tmpbuf);
+ unlink(tmpbuf);
+ return fd;
+}
+
+int dso__decompress_kmodule_path(struct dso *dso, const char *name,
+ char *pathname, size_t len)
+{
+ char tmpbuf[] = KMOD_DECOMP_NAME;
+ int fd;
+
+ fd = decompress_kmodule(dso, name, tmpbuf);
+ if (fd < 0) {
+ unlink(tmpbuf);
+ return -1;
+ }
+
+ strncpy(pathname, tmpbuf, len);
+ close(fd);
+ return 0;
+}
+
/*
* Parses kernel module specified in @path and updates
* @m argument like:
@@ -335,6 +393,21 @@
return 0;
}
+void dso__set_module_info(struct dso *dso, struct kmod_path *m,
+ struct machine *machine)
+{
+ if (machine__is_host(machine))
+ dso->symtab_type = DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE;
+ else
+ dso->symtab_type = DSO_BINARY_TYPE__GUEST_KMODULE;
+
+ /* _KMODULE_COMP should be next to _KMODULE */
+ if (m->kmod && m->comp)
+ dso->symtab_type++;
+
+ dso__set_short_name(dso, strdup(m->name), true);
+}
+
/*
* Global list of open DSOs and the counter.
*/
@@ -381,7 +454,7 @@
static int __open_dso(struct dso *dso, struct machine *machine)
{
- int fd;
+ int fd = -EINVAL;
char *root_dir = (char *)"";
char *name = malloc(PATH_MAX);
@@ -392,15 +465,30 @@
root_dir = machine->root_dir;
if (dso__read_binary_type_filename(dso, dso->binary_type,
- root_dir, name, PATH_MAX)) {
- free(name);
- return -EINVAL;
- }
+ root_dir, name, PATH_MAX))
+ goto out;
if (!is_regular_file(name))
- return -EINVAL;
+ goto out;
+
+ if (dso__needs_decompress(dso)) {
+ char newpath[KMOD_DECOMP_LEN];
+ size_t len = sizeof(newpath);
+
+ if (dso__decompress_kmodule_path(dso, name, newpath, len) < 0) {
+ fd = -dso->load_errno;
+ goto out;
+ }
+
+ strcpy(name, newpath);
+ }
fd = do_open(name);
+
+ if (dso__needs_decompress(dso))
+ unlink(name);
+
+out:
free(name);
return fd;
}
diff --git a/tools/perf/util/dso.h b/tools/perf/util/dso.h
index 12350b1..bd061ba 100644
--- a/tools/perf/util/dso.h
+++ b/tools/perf/util/dso.h
@@ -244,6 +244,12 @@
bool is_kernel_module(const char *pathname, int cpumode);
bool decompress_to_file(const char *ext, const char *filename, int output_fd);
bool dso__needs_decompress(struct dso *dso);
+int dso__decompress_kmodule_fd(struct dso *dso, const char *name);
+int dso__decompress_kmodule_path(struct dso *dso, const char *name,
+ char *pathname, size_t len);
+
+#define KMOD_DECOMP_NAME "/tmp/perf-kmod-XXXXXX"
+#define KMOD_DECOMP_LEN sizeof(KMOD_DECOMP_NAME)
struct kmod_path {
char *name;
@@ -259,6 +265,9 @@
#define kmod_path__parse_name(__m, __p) __kmod_path__parse(__m, __p, true , false)
#define kmod_path__parse_ext(__m, __p) __kmod_path__parse(__m, __p, false, true)
+void dso__set_module_info(struct dso *dso, struct kmod_path *m,
+ struct machine *machine);
+
/*
* The dso__data_* external interface provides following functions:
* dso__data_get_fd
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index e4f7902..cda44b0 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -273,8 +273,20 @@
struct perf_evsel *evsel;
event_attr_init(&attr);
+ /*
+ * Unnamed union member, not supported as struct member named
+ * initializer in older compilers such as gcc 4.4.7
+ *
+ * Just for probing the precise_ip:
+ */
+ attr.sample_period = 1;
perf_event_attr__set_max_precise_ip(&attr);
+ /*
+ * Now let the usual logic to set up the perf_event_attr defaults
+ * to kick in when we return and before perf_evsel__open() is called.
+ */
+ attr.sample_period = 0;
evsel = perf_evsel__new(&attr);
if (evsel == NULL)
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 314a071..b5baff3 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -841,7 +841,7 @@
/*
* default get_cpuid(): nothing gets recorded
- * actual implementation must be in arch/$(ARCH)/util/header.c
+ * actual implementation must be in arch/$(SRCARCH)/util/header.c
*/
int __weak get_cpuid(char *buffer __maybe_unused, size_t sz __maybe_unused)
{
@@ -1469,8 +1469,16 @@
dso__set_build_id(dso, &bev->build_id);
- if (!is_kernel_module(filename, cpumode))
- dso->kernel = dso_type;
+ if (dso_type != DSO_TYPE_USER) {
+ struct kmod_path m = { .name = NULL, };
+
+ if (!kmod_path__parse_name(&m, filename) && m.kmod)
+ dso__set_module_info(dso, &m, machine);
+ else
+ dso->kernel = dso_type;
+
+ free(m.name);
+ }
build_id__sprintf(dso->build_id, sizeof(dso->build_id),
sbuild_id);
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index d97e014..d7f31cb 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -572,16 +572,7 @@
if (dso == NULL)
goto out_unlock;
- if (machine__is_host(machine))
- dso->symtab_type = DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE;
- else
- dso->symtab_type = DSO_BINARY_TYPE__GUEST_KMODULE;
-
- /* _KMODULE_COMP should be next to _KMODULE */
- if (m->kmod && m->comp)
- dso->symtab_type++;
-
- dso__set_short_name(dso, strdup(m->name), true);
+ dso__set_module_info(dso, m, machine);
dso__set_long_name(dso, strdup(filename), true);
}
diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c
index 9d92af7..40de3cb 100644
--- a/tools/perf/util/scripting-engines/trace-event-python.c
+++ b/tools/perf/util/scripting-engines/trace-event-python.c
@@ -1219,7 +1219,7 @@
fprintf(ofp, "# be retrieved using Python functions of the form "
"common_*(context).\n");
- fprintf(ofp, "# See the perf-trace-python Documentation for the list "
+ fprintf(ofp, "# See the perf-script-python Documentation for the list "
"of available functions.\n\n");
fprintf(ofp, "import os\n");
diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
index e7ee47f..502505c 100644
--- a/tools/perf/util/symbol-elf.c
+++ b/tools/perf/util/symbol-elf.c
@@ -637,43 +637,6 @@
return 0;
}
-static int decompress_kmodule(struct dso *dso, const char *name,
- enum dso_binary_type type)
-{
- int fd = -1;
- char tmpbuf[] = "/tmp/perf-kmod-XXXXXX";
- struct kmod_path m;
-
- if (type != DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP &&
- type != DSO_BINARY_TYPE__GUEST_KMODULE_COMP &&
- type != DSO_BINARY_TYPE__BUILD_ID_CACHE)
- return -1;
-
- if (type == DSO_BINARY_TYPE__BUILD_ID_CACHE)
- name = dso->long_name;
-
- if (kmod_path__parse_ext(&m, name) || !m.comp)
- return -1;
-
- fd = mkstemp(tmpbuf);
- if (fd < 0) {
- dso->load_errno = errno;
- goto out;
- }
-
- if (!decompress_to_file(m.ext, name, fd)) {
- dso->load_errno = DSO_LOAD_ERRNO__DECOMPRESSION_FAILURE;
- close(fd);
- fd = -1;
- }
-
- unlink(tmpbuf);
-
-out:
- free(m.ext);
- return fd;
-}
-
bool symsrc__possibly_runtime(struct symsrc *ss)
{
return ss->dynsym || ss->opdsec;
@@ -705,9 +668,11 @@
int fd;
if (dso__needs_decompress(dso)) {
- fd = decompress_kmodule(dso, name, type);
+ fd = dso__decompress_kmodule_fd(dso, name);
if (fd < 0)
return -1;
+
+ type = dso->symtab_type;
} else {
fd = open(name, O_RDONLY);
if (fd < 0) {
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 8f2b068..e7a98db 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -1562,10 +1562,6 @@
if (!runtime_ss && syms_ss)
runtime_ss = syms_ss;
- if (syms_ss && syms_ss->type == DSO_BINARY_TYPE__BUILD_ID_CACHE)
- if (dso__build_id_is_kmod(dso, name, PATH_MAX))
- kmod = true;
-
if (syms_ss)
ret = dso__load_sym(dso, map, syms_ss, runtime_ss, kmod);
else
diff --git a/tools/perf/util/unwind-libdw.c b/tools/perf/util/unwind-libdw.c
index 943a062..7755a5e0 100644
--- a/tools/perf/util/unwind-libdw.c
+++ b/tools/perf/util/unwind-libdw.c
@@ -39,6 +39,14 @@
return 0;
mod = dwfl_addrmodule(ui->dwfl, ip);
+ if (mod) {
+ Dwarf_Addr s;
+
+ dwfl_module_info(mod, NULL, &s, NULL, NULL, NULL, NULL, NULL);
+ if (s != al->map->start)
+ mod = 0;
+ }
+
if (!mod)
mod = dwfl_report_elf(ui->dwfl, dso->short_name,
dso->long_name, -1, al->map->start,
@@ -170,6 +178,14 @@
Dwarf_Addr pc;
bool isactivation;
+ if (!dwfl_frame_pc(state, &pc, NULL)) {
+ pr_err("%s", dwfl_errmsg(-1));
+ return DWARF_CB_ABORT;
+ }
+
+ // report the module before we query for isactivation
+ report_module(pc, ui);
+
if (!dwfl_frame_pc(state, &pc, &isactivation)) {
pr_err("%s", dwfl_errmsg(-1));
return DWARF_CB_ABORT;
@@ -224,7 +240,7 @@
err = dwfl_getthread_frames(ui->dwfl, thread->tid, frame_callback, ui);
- if (err && !ui->max_stack)
+ if (err && ui->max_stack != max_stack)
err = 0;
/*
diff --git a/tools/testing/selftests/bpf/bpf_endian.h b/tools/testing/selftests/bpf/bpf_endian.h
index 19d0604..487cbfb 100644
--- a/tools/testing/selftests/bpf/bpf_endian.h
+++ b/tools/testing/selftests/bpf/bpf_endian.h
@@ -1,23 +1,42 @@
#ifndef __BPF_ENDIAN__
#define __BPF_ENDIAN__
-#include <asm/byteorder.h>
+#include <linux/swab.h>
-#if __BYTE_ORDER == __LITTLE_ENDIAN
-# define __bpf_ntohs(x) __builtin_bswap16(x)
-# define __bpf_htons(x) __builtin_bswap16(x)
-#elif __BYTE_ORDER == __BIG_ENDIAN
-# define __bpf_ntohs(x) (x)
-# define __bpf_htons(x) (x)
+/* LLVM's BPF target selects the endianness of the CPU
+ * it compiles on, or the user specifies (bpfel/bpfeb),
+ * respectively. The used __BYTE_ORDER__ is defined by
+ * the compiler, we cannot rely on __BYTE_ORDER from
+ * libc headers, since it doesn't reflect the actual
+ * requested byte order.
+ *
+ * Note, LLVM's BPF target has different __builtin_bswapX()
+ * semantics. It does map to BPF_ALU | BPF_END | BPF_TO_BE
+ * in bpfel and bpfeb case, which means below, that we map
+ * to cpu_to_be16(). We could use it unconditionally in BPF
+ * case, but better not rely on it, so that this header here
+ * can be used from application and BPF program side, which
+ * use different targets.
+ */
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+# define __bpf_ntohs(x) __builtin_bswap16(x)
+# define __bpf_htons(x) __builtin_bswap16(x)
+# define __bpf_constant_ntohs(x) ___constant_swab16(x)
+# define __bpf_constant_htons(x) ___constant_swab16(x)
+#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+# define __bpf_ntohs(x) (x)
+# define __bpf_htons(x) (x)
+# define __bpf_constant_ntohs(x) (x)
+# define __bpf_constant_htons(x) (x)
#else
-# error "Fix your __BYTE_ORDER?!"
+# error "Fix your compiler's __BYTE_ORDER__?!"
#endif
#define bpf_htons(x) \
(__builtin_constant_p(x) ? \
- __constant_htons(x) : __bpf_htons(x))
+ __bpf_constant_htons(x) : __bpf_htons(x))
#define bpf_ntohs(x) \
(__builtin_constant_p(x) ? \
- __constant_ntohs(x) : __bpf_ntohs(x))
+ __bpf_constant_ntohs(x) : __bpf_ntohs(x))
-#endif
+#endif /* __BPF_ENDIAN__ */
diff --git a/virt/kvm/arm/hyp/vgic-v3-sr.c b/virt/kvm/arm/hyp/vgic-v3-sr.c
index 32c3295..8794036 100644
--- a/virt/kvm/arm/hyp/vgic-v3-sr.c
+++ b/virt/kvm/arm/hyp/vgic-v3-sr.c
@@ -22,7 +22,7 @@
#include <asm/kvm_hyp.h>
#define vtr_to_max_lr_idx(v) ((v) & 0xf)
-#define vtr_to_nr_pre_bits(v) (((u32)(v) >> 26) + 1)
+#define vtr_to_nr_pre_bits(v) ((((u32)(v) >> 26) & 7) + 1)
static u64 __hyp_text __gic_v3_get_lr(unsigned int lr)
{
diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
index a2d6324..e2e5eff 100644
--- a/virt/kvm/arm/mmu.c
+++ b/virt/kvm/arm/mmu.c
@@ -879,6 +879,9 @@
pmd_t *pmd;
pud = stage2_get_pud(kvm, cache, addr);
+ if (!pud)
+ return NULL;
+
if (stage2_pud_none(*pud)) {
if (!cache)
return NULL;
diff --git a/virt/kvm/arm/vgic/vgic-mmio-v2.c b/virt/kvm/arm/vgic/vgic-mmio-v2.c
index 0a4283e..63e0bbd 100644
--- a/virt/kvm/arm/vgic/vgic-mmio-v2.c
+++ b/virt/kvm/arm/vgic/vgic-mmio-v2.c
@@ -226,7 +226,13 @@
switch (addr & 0xff) {
case GIC_CPU_CTRL:
- val = vmcr.ctlr;
+ val = vmcr.grpen0 << GIC_CPU_CTRL_EnableGrp0_SHIFT;
+ val |= vmcr.grpen1 << GIC_CPU_CTRL_EnableGrp1_SHIFT;
+ val |= vmcr.ackctl << GIC_CPU_CTRL_AckCtl_SHIFT;
+ val |= vmcr.fiqen << GIC_CPU_CTRL_FIQEn_SHIFT;
+ val |= vmcr.cbpr << GIC_CPU_CTRL_CBPR_SHIFT;
+ val |= vmcr.eoim << GIC_CPU_CTRL_EOImodeNS_SHIFT;
+
break;
case GIC_CPU_PRIMASK:
/*
@@ -267,7 +273,13 @@
switch (addr & 0xff) {
case GIC_CPU_CTRL:
- vmcr.ctlr = val;
+ vmcr.grpen0 = !!(val & GIC_CPU_CTRL_EnableGrp0);
+ vmcr.grpen1 = !!(val & GIC_CPU_CTRL_EnableGrp1);
+ vmcr.ackctl = !!(val & GIC_CPU_CTRL_AckCtl);
+ vmcr.fiqen = !!(val & GIC_CPU_CTRL_FIQEn);
+ vmcr.cbpr = !!(val & GIC_CPU_CTRL_CBPR);
+ vmcr.eoim = !!(val & GIC_CPU_CTRL_EOImodeNS);
+
break;
case GIC_CPU_PRIMASK:
/*
diff --git a/virt/kvm/arm/vgic/vgic-v2.c b/virt/kvm/arm/vgic/vgic-v2.c
index 504b4bd..e4187e5 100644
--- a/virt/kvm/arm/vgic/vgic-v2.c
+++ b/virt/kvm/arm/vgic/vgic-v2.c
@@ -177,7 +177,18 @@
struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
u32 vmcr;
- vmcr = (vmcrp->ctlr << GICH_VMCR_CTRL_SHIFT) & GICH_VMCR_CTRL_MASK;
+ vmcr = (vmcrp->grpen0 << GICH_VMCR_ENABLE_GRP0_SHIFT) &
+ GICH_VMCR_ENABLE_GRP0_MASK;
+ vmcr |= (vmcrp->grpen1 << GICH_VMCR_ENABLE_GRP1_SHIFT) &
+ GICH_VMCR_ENABLE_GRP1_MASK;
+ vmcr |= (vmcrp->ackctl << GICH_VMCR_ACK_CTL_SHIFT) &
+ GICH_VMCR_ACK_CTL_MASK;
+ vmcr |= (vmcrp->fiqen << GICH_VMCR_FIQ_EN_SHIFT) &
+ GICH_VMCR_FIQ_EN_MASK;
+ vmcr |= (vmcrp->cbpr << GICH_VMCR_CBPR_SHIFT) &
+ GICH_VMCR_CBPR_MASK;
+ vmcr |= (vmcrp->eoim << GICH_VMCR_EOI_MODE_SHIFT) &
+ GICH_VMCR_EOI_MODE_MASK;
vmcr |= (vmcrp->abpr << GICH_VMCR_ALIAS_BINPOINT_SHIFT) &
GICH_VMCR_ALIAS_BINPOINT_MASK;
vmcr |= (vmcrp->bpr << GICH_VMCR_BINPOINT_SHIFT) &
@@ -195,8 +206,19 @@
vmcr = cpu_if->vgic_vmcr;
- vmcrp->ctlr = (vmcr & GICH_VMCR_CTRL_MASK) >>
- GICH_VMCR_CTRL_SHIFT;
+ vmcrp->grpen0 = (vmcr & GICH_VMCR_ENABLE_GRP0_MASK) >>
+ GICH_VMCR_ENABLE_GRP0_SHIFT;
+ vmcrp->grpen1 = (vmcr & GICH_VMCR_ENABLE_GRP1_MASK) >>
+ GICH_VMCR_ENABLE_GRP1_SHIFT;
+ vmcrp->ackctl = (vmcr & GICH_VMCR_ACK_CTL_MASK) >>
+ GICH_VMCR_ACK_CTL_SHIFT;
+ vmcrp->fiqen = (vmcr & GICH_VMCR_FIQ_EN_MASK) >>
+ GICH_VMCR_FIQ_EN_SHIFT;
+ vmcrp->cbpr = (vmcr & GICH_VMCR_CBPR_MASK) >>
+ GICH_VMCR_CBPR_SHIFT;
+ vmcrp->eoim = (vmcr & GICH_VMCR_EOI_MODE_MASK) >>
+ GICH_VMCR_EOI_MODE_SHIFT;
+
vmcrp->abpr = (vmcr & GICH_VMCR_ALIAS_BINPOINT_MASK) >>
GICH_VMCR_ALIAS_BINPOINT_SHIFT;
vmcrp->bpr = (vmcr & GICH_VMCR_BINPOINT_MASK) >>
diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c
index 6fe3f00..030248e 100644
--- a/virt/kvm/arm/vgic/vgic-v3.c
+++ b/virt/kvm/arm/vgic/vgic-v3.c
@@ -159,15 +159,24 @@
void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
{
struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
+ u32 model = vcpu->kvm->arch.vgic.vgic_model;
u32 vmcr;
- /*
- * Ignore the FIQen bit, because GIC emulation always implies
- * SRE=1 which means the vFIQEn bit is also RES1.
- */
- vmcr = ((vmcrp->ctlr >> ICC_CTLR_EL1_EOImode_SHIFT) <<
- ICH_VMCR_EOIM_SHIFT) & ICH_VMCR_EOIM_MASK;
- vmcr |= (vmcrp->ctlr << ICH_VMCR_CBPR_SHIFT) & ICH_VMCR_CBPR_MASK;
+ if (model == KVM_DEV_TYPE_ARM_VGIC_V2) {
+ vmcr = (vmcrp->ackctl << ICH_VMCR_ACK_CTL_SHIFT) &
+ ICH_VMCR_ACK_CTL_MASK;
+ vmcr |= (vmcrp->fiqen << ICH_VMCR_FIQ_EN_SHIFT) &
+ ICH_VMCR_FIQ_EN_MASK;
+ } else {
+ /*
+ * When emulating GICv3 on GICv3 with SRE=1 on the
+ * VFIQEn bit is RES1 and the VAckCtl bit is RES0.
+ */
+ vmcr = ICH_VMCR_FIQ_EN_MASK;
+ }
+
+ vmcr |= (vmcrp->cbpr << ICH_VMCR_CBPR_SHIFT) & ICH_VMCR_CBPR_MASK;
+ vmcr |= (vmcrp->eoim << ICH_VMCR_EOIM_SHIFT) & ICH_VMCR_EOIM_MASK;
vmcr |= (vmcrp->abpr << ICH_VMCR_BPR1_SHIFT) & ICH_VMCR_BPR1_MASK;
vmcr |= (vmcrp->bpr << ICH_VMCR_BPR0_SHIFT) & ICH_VMCR_BPR0_MASK;
vmcr |= (vmcrp->pmr << ICH_VMCR_PMR_SHIFT) & ICH_VMCR_PMR_MASK;
@@ -180,17 +189,27 @@
void vgic_v3_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
{
struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
+ u32 model = vcpu->kvm->arch.vgic.vgic_model;
u32 vmcr;
vmcr = cpu_if->vgic_vmcr;
- /*
- * Ignore the FIQen bit, because GIC emulation always implies
- * SRE=1 which means the vFIQEn bit is also RES1.
- */
- vmcrp->ctlr = ((vmcr >> ICH_VMCR_EOIM_SHIFT) <<
- ICC_CTLR_EL1_EOImode_SHIFT) & ICC_CTLR_EL1_EOImode_MASK;
- vmcrp->ctlr |= (vmcr & ICH_VMCR_CBPR_MASK) >> ICH_VMCR_CBPR_SHIFT;
+ if (model == KVM_DEV_TYPE_ARM_VGIC_V2) {
+ vmcrp->ackctl = (vmcr & ICH_VMCR_ACK_CTL_MASK) >>
+ ICH_VMCR_ACK_CTL_SHIFT;
+ vmcrp->fiqen = (vmcr & ICH_VMCR_FIQ_EN_MASK) >>
+ ICH_VMCR_FIQ_EN_SHIFT;
+ } else {
+ /*
+ * When emulating GICv3 on GICv3 with SRE=1 on the
+ * VFIQEn bit is RES1 and the VAckCtl bit is RES0.
+ */
+ vmcrp->fiqen = 1;
+ vmcrp->ackctl = 0;
+ }
+
+ vmcrp->cbpr = (vmcr & ICH_VMCR_CBPR_MASK) >> ICH_VMCR_CBPR_SHIFT;
+ vmcrp->eoim = (vmcr & ICH_VMCR_EOIM_MASK) >> ICH_VMCR_EOIM_SHIFT;
vmcrp->abpr = (vmcr & ICH_VMCR_BPR1_MASK) >> ICH_VMCR_BPR1_SHIFT;
vmcrp->bpr = (vmcr & ICH_VMCR_BPR0_MASK) >> ICH_VMCR_BPR0_SHIFT;
vmcrp->pmr = (vmcr & ICH_VMCR_PMR_MASK) >> ICH_VMCR_PMR_SHIFT;
diff --git a/virt/kvm/arm/vgic/vgic.h b/virt/kvm/arm/vgic/vgic.h
index da83e4c..bba7fa2 100644
--- a/virt/kvm/arm/vgic/vgic.h
+++ b/virt/kvm/arm/vgic/vgic.h
@@ -111,14 +111,18 @@
* registers regardless of the hardware backed GIC used.
*/
struct vgic_vmcr {
- u32 ctlr;
+ u32 grpen0;
+ u32 grpen1;
+
+ u32 ackctl;
+ u32 fiqen;
+ u32 cbpr;
+ u32 eoim;
+
u32 abpr;
u32 bpr;
u32 pmr; /* Priority mask field in the GICC_PMR and
* ICC_PMR_EL1 priority field format */
- /* Below member variable are valid only for GICv3 */
- u32 grpen0;
- u32 grpen1;
};
struct vgic_reg_attr {