Merge tag 'v3.11-rc7' into stable/for-linus-3.12
Linux 3.11-rc7
As we need the git commit 28817e9de4f039a1a8c1fe1df2fa2df524626b9e
Author: Chuck Anderson <chuck.anderson@oracle.com>
Date: Tue Aug 6 15:12:19 2013 -0700
xen/smp: initialize IPI vectors before marking CPU online
* tag 'v3.11-rc7': (443 commits)
Linux 3.11-rc7
ARC: [lib] strchr breakage in Big-endian configuration
VFS: collect_mounts() should return an ERR_PTR
bfs: iget_locked() doesn't return an ERR_PTR
efs: iget_locked() doesn't return an ERR_PTR()
proc: kill the extra proc_readfd_common()->dir_emit_dots()
cope with potentially long ->d_dname() output for shmem/hugetlb
usb: phy: fix build breakage
USB: OHCI: add missing PCI PM callbacks to ohci-pci.c
staging: comedi: bug-fix NULL pointer dereference on failed attach
lib/lz4: correct the LZ4 license
memcg: get rid of swapaccount leftovers
nilfs2: fix issue with counting number of bio requests for BIO_EOPNOTSUPP error detection
nilfs2: remove double bio_put() in nilfs_end_bio_write() for BIO_EOPNOTSUPP error
drivers/platform/olpc/olpc-ec.c: initialise earlier
ipv4: expose IPV4_DEVCONF
ipv6: handle Redirect ICMP Message with no Redirected Header option
be2net: fix disabling TX in be_close()
Revert "ACPI / video: Always call acpi_video_init_brightness() on init"
Revert "genetlink: fix family dump race"
...
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
diff --git a/Documentation/DocBook/media_api.tmpl b/Documentation/DocBook/media_api.tmpl
index 6a8b715..9c92bb8 100644
--- a/Documentation/DocBook/media_api.tmpl
+++ b/Documentation/DocBook/media_api.tmpl
@@ -1,6 +1,6 @@
<?xml version="1.0"?>
-<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN"
- "http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" [
+<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.2//EN"
+ "http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd" [
<!ENTITY % media-entities SYSTEM "./media-entities.tmpl"> %media-entities;
<!ENTITY media-indices SYSTEM "./media-indices.tmpl">
diff --git a/Documentation/devicetree/bindings/i2c/i2c-mv64xxx.txt b/Documentation/devicetree/bindings/i2c/i2c-mv64xxx.txt
index a1ee681..6113f92 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-mv64xxx.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-mv64xxx.txt
@@ -4,7 +4,7 @@
Required properties :
- reg : Offset and length of the register set for the device
- - compatible : Should be "marvell,mv64xxx-i2c"
+ - compatible : Should be "marvell,mv64xxx-i2c" or "allwinner,sun4i-i2c"
- interrupts : The interrupt number
Optional properties :
diff --git a/Documentation/devicetree/bindings/regulator/palmas-pmic.txt b/Documentation/devicetree/bindings/regulator/palmas-pmic.txt
index d5a3086..30b0581 100644
--- a/Documentation/devicetree/bindings/regulator/palmas-pmic.txt
+++ b/Documentation/devicetree/bindings/regulator/palmas-pmic.txt
@@ -31,9 +31,8 @@
Optional sub-node properties:
ti,warm-reset - maintain voltage during warm reset(boolean)
ti,roof-floor - control voltage selection by pin(boolean)
- ti,sleep-mode - mode to adopt in pmic sleep 0 - off, 1 - auto,
+ ti,mode-sleep - mode to adopt in pmic sleep 0 - off, 1 - auto,
2 - eco, 3 - forced pwm
- ti,tstep - slope control 0 - Jump, 1 10mV/us, 2 5mV/us, 3 2.5mV/us
ti,smps-range - OTP has the wrong range set for the hardware so override
0 - low range, 1 - high range.
@@ -59,7 +58,6 @@
ti,warm-reset;
ti,roof-floor;
ti,mode-sleep = <0>;
- ti,tstep = <0>;
ti,smps-range = <1>;
};
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 15356ac..7f9d4f5 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -2953,7 +2953,7 @@
improve throughput, but will also increase the
amount of memory reserved for use by the client.
- swapaccount[=0|1]
+ swapaccount=[0|1]
[KNL] Enable accounting of swap in memory resource
controller if no parameter or 1 is given or disable
it if 0 is given (See Documentation/cgroups/memory.txt)
diff --git a/MAINTAINERS b/MAINTAINERS
index 0161dad..94aa87dc 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -965,6 +965,12 @@
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
+ARM/TEXAS INSTRUMENT KEYSTONE ARCHITECTURE
+M: Santosh Shilimkar <santosh.shilimkar@ti.com>
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+S: Maintained
+F: arch/arm/mach-keystone/
+
ARM/LOGICPD PXA270 MACHINE SUPPORT
M: Lennert Buytenhek <kernel@wantstofly.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -1259,7 +1265,6 @@
T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-stericsson.git
ARM/Ux500 ARM ARCHITECTURE
-M: Srinidhi Kasagar <srinidhi.kasagar@stericsson.com>
M: Linus Walleij <linus.walleij@linaro.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
@@ -5576,9 +5581,9 @@
F: drivers/media/tuners/mxl5007t.*
MYRICOM MYRI-10G 10GbE DRIVER (MYRI10GE)
-M: Andrew Gallatin <gallatin@myri.com>
+M: Hyong-Youb Kim <hykim@myri.com>
L: netdev@vger.kernel.org
-W: http://www.myri.com/scs/download-Myri10GE.html
+W: https://www.myricom.com/support/downloads/myri10ge.html
S: Supported
F: drivers/net/ethernet/myricom/myri10ge/
@@ -5879,7 +5884,7 @@
F: include/linux/i2c-omap.h
OMAP DEVICE TREE SUPPORT
-M: Benoît Cousson <b-cousson@ti.com>
+M: Benoît Cousson <bcousson@baylibre.com>
M: Tony Lindgren <tony@atomide.com>
L: linux-omap@vger.kernel.org
L: devicetree@vger.kernel.org
@@ -5959,14 +5964,14 @@
F: drivers/char/hw_random/omap-rng.c
OMAP HWMOD SUPPORT
-M: Benoît Cousson <b-cousson@ti.com>
+M: Benoît Cousson <bcousson@baylibre.com>
M: Paul Walmsley <paul@pwsan.com>
L: linux-omap@vger.kernel.org
S: Maintained
F: arch/arm/mach-omap2/omap_hwmod.*
OMAP HWMOD DATA FOR OMAP4-BASED DEVICES
-M: Benoît Cousson <b-cousson@ti.com>
+M: Benoît Cousson <bcousson@baylibre.com>
L: linux-omap@vger.kernel.org
S: Maintained
F: arch/arm/mach-omap2/omap_hwmod_44xx_data.c
@@ -7361,7 +7366,6 @@
SGI GRU DRIVER
M: Dimitri Sivanich <sivanich@sgi.com>
-M: Robin Holt <holt@sgi.com>
S: Maintained
F: drivers/misc/sgi-gru/
@@ -7381,7 +7385,8 @@
F: Documentation/sgi-visws.txt
SGI XP/XPC/XPNET DRIVER
-M: Robin Holt <holt@sgi.com>
+M: Cliff Whickman <cpw@sgi.com>
+M: Robin Holt <robinmholt@gmail.com>
S: Maintained
F: drivers/misc/sgi-xp/
@@ -8664,6 +8669,11 @@
S: Maintained
F: sound/usb/midi.*
+USB NETWORKING DRIVERS
+L: linux-usb@vger.kernel.org
+S: Odd Fixes
+F: drivers/net/usb/
+
USB OHCI DRIVER
M: Alan Stern <stern@rowland.harvard.edu>
L: linux-usb@vger.kernel.org
diff --git a/Makefile b/Makefile
index f93d4f7..369882e 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 3
PATCHLEVEL = 11
SUBLEVEL = 0
-EXTRAVERSION = -rc4
+EXTRAVERSION = -rc7
NAME = Linux for Workgroups
# *DOCUMENTATION*
diff --git a/arch/Kconfig b/arch/Kconfig
index 8d2ae24..1feb169 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -407,6 +407,12 @@
help
Architecture has the first two arguments of clone(2) swapped.
+config CLONE_BACKWARDS3
+ bool
+ help
+ Architecture has tls passed as the 3rd argument of clone(2),
+ not the 5th one.
+
config ODD_RT_SIGACTION
bool
help
diff --git a/arch/arc/lib/strchr-700.S b/arch/arc/lib/strchr-700.S
index 99c1047..9c548c7 100644
--- a/arch/arc/lib/strchr-700.S
+++ b/arch/arc/lib/strchr-700.S
@@ -39,9 +39,18 @@
ld.a r2,[r0,4]
sub r12,r6,r7
bic r12,r12,r6
+#ifdef __LITTLE_ENDIAN__
and r7,r12,r4
breq r7,0,.Loop ; For speed, we want this branch to be unaligned.
b .Lfound_char ; Likewise this one.
+#else
+ and r12,r12,r4
+ breq r12,0,.Loop ; For speed, we want this branch to be unaligned.
+ lsr_s r12,r12,7
+ bic r2,r7,r6
+ b.d .Lfound_char_b
+ and_s r2,r2,r12
+#endif
; /* We require this code address to be unaligned for speed... */
.Laligned:
ld_s r2,[r0]
@@ -95,6 +104,7 @@
lsr r7,r7,7
bic r2,r7,r6
+.Lfound_char_b:
norm r2,r2
sub_s r0,r0,4
asr_s r2,r2,3
diff --git a/arch/arm/boot/dts/at91sam9n12ek.dts b/arch/arm/boot/dts/at91sam9n12ek.dts
index d59b70c..3d77dbe 100644
--- a/arch/arm/boot/dts/at91sam9n12ek.dts
+++ b/arch/arm/boot/dts/at91sam9n12ek.dts
@@ -14,11 +14,11 @@
compatible = "atmel,at91sam9n12ek", "atmel,at91sam9n12", "atmel,at91sam9";
chosen {
- bootargs = "mem=128M console=ttyS0,115200 root=/dev/mtdblock1 rw rootfstype=jffs2";
+ bootargs = "console=ttyS0,115200 root=/dev/mtdblock1 rw rootfstype=jffs2";
};
memory {
- reg = <0x20000000 0x10000000>;
+ reg = <0x20000000 0x8000000>;
};
clocks {
diff --git a/arch/arm/boot/dts/at91sam9x5ek.dtsi b/arch/arm/boot/dts/at91sam9x5ek.dtsi
index b753855..49e3c45 100644
--- a/arch/arm/boot/dts/at91sam9x5ek.dtsi
+++ b/arch/arm/boot/dts/at91sam9x5ek.dtsi
@@ -94,8 +94,9 @@
usb0: ohci@00600000 {
status = "okay";
- num-ports = <2>;
- atmel,vbus-gpio = <&pioD 19 GPIO_ACTIVE_LOW
+ num-ports = <3>;
+ atmel,vbus-gpio = <0 /* &pioD 18 GPIO_ACTIVE_LOW *//* Activate to have access to port A */
+ &pioD 19 GPIO_ACTIVE_LOW
&pioD 20 GPIO_ACTIVE_LOW
>;
};
diff --git a/arch/arm/boot/dts/msm8960-cdp.dts b/arch/arm/boot/dts/msm8960-cdp.dts
index db2060c..9c1167b0 100644
--- a/arch/arm/boot/dts/msm8960-cdp.dts
+++ b/arch/arm/boot/dts/msm8960-cdp.dts
@@ -26,7 +26,7 @@
cpu-offset = <0x80000>;
};
- msmgpio: gpio@fd510000 {
+ msmgpio: gpio@800000 {
compatible = "qcom,msm-gpio";
gpio-controller;
#gpio-cells = <2>;
@@ -34,7 +34,7 @@
interrupts = <0 32 0x4>;
interrupt-controller;
#interrupt-cells = <2>;
- reg = <0xfd510000 0x4000>;
+ reg = <0x800000 0x4000>;
};
serial@16440000 {
diff --git a/arch/arm/boot/dts/omap5-uevm.dts b/arch/arm/boot/dts/omap5-uevm.dts
index 08b7267..65d7b60 100644
--- a/arch/arm/boot/dts/omap5-uevm.dts
+++ b/arch/arm/boot/dts/omap5-uevm.dts
@@ -235,7 +235,7 @@
};
&mmc1 {
- vmmc-supply = <&vmmcsd_fixed>;
+ vmmc-supply = <&ldo9_reg>;
bus-width = <4>;
};
@@ -282,6 +282,7 @@
regulators {
smps123_reg: smps123 {
+ /* VDD_OPP_MPU */
regulator-name = "smps123";
regulator-min-microvolt = < 600000>;
regulator-max-microvolt = <1500000>;
@@ -290,6 +291,7 @@
};
smps45_reg: smps45 {
+ /* VDD_OPP_MM */
regulator-name = "smps45";
regulator-min-microvolt = < 600000>;
regulator-max-microvolt = <1310000>;
@@ -298,6 +300,7 @@
};
smps6_reg: smps6 {
+ /* VDD_DDR3 - over VDD_SMPS6 */
regulator-name = "smps6";
regulator-min-microvolt = <1200000>;
regulator-max-microvolt = <1200000>;
@@ -306,6 +309,7 @@
};
smps7_reg: smps7 {
+ /* VDDS_1v8_OMAP over VDDS_1v8_MAIN */
regulator-name = "smps7";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
@@ -314,6 +318,7 @@
};
smps8_reg: smps8 {
+ /* VDD_OPP_CORE */
regulator-name = "smps8";
regulator-min-microvolt = < 600000>;
regulator-max-microvolt = <1310000>;
@@ -322,15 +327,15 @@
};
smps9_reg: smps9 {
+ /* VDDA_2v1_AUD over VDD_2v1 */
regulator-name = "smps9";
regulator-min-microvolt = <2100000>;
regulator-max-microvolt = <2100000>;
- regulator-always-on;
- regulator-boot-on;
ti,smps-range = <0x80>;
};
smps10_reg: smps10 {
+ /* VBUS_5V_OTG */
regulator-name = "smps10";
regulator-min-microvolt = <5000000>;
regulator-max-microvolt = <5000000>;
@@ -339,38 +344,40 @@
};
ldo1_reg: ldo1 {
+ /* VDDAPHY_CAM: vdda_csiport */
regulator-name = "ldo1";
- regulator-min-microvolt = <2800000>;
- regulator-max-microvolt = <2800000>;
- regulator-always-on;
- regulator-boot-on;
+ regulator-min-microvolt = <1500000>;
+ regulator-max-microvolt = <1800000>;
};
ldo2_reg: ldo2 {
+ /* VCC_2V8_DISP: Does not go anywhere */
regulator-name = "ldo2";
- regulator-min-microvolt = <2900000>;
- regulator-max-microvolt = <2900000>;
- regulator-always-on;
- regulator-boot-on;
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ /* Unused */
+ status = "disabled";
};
ldo3_reg: ldo3 {
+ /* VDDAPHY_MDM: vdda_lli */
regulator-name = "ldo3";
- regulator-min-microvolt = <3000000>;
- regulator-max-microvolt = <3000000>;
- regulator-always-on;
+ regulator-min-microvolt = <1500000>;
+ regulator-max-microvolt = <1500000>;
regulator-boot-on;
+ /* Only if Modem is used */
+ status = "disabled";
};
ldo4_reg: ldo4 {
+ /* VDDAPHY_DISP: vdda_dsiport/hdmi */
regulator-name = "ldo4";
- regulator-min-microvolt = <2200000>;
- regulator-max-microvolt = <2200000>;
- regulator-always-on;
- regulator-boot-on;
+ regulator-min-microvolt = <1500000>;
+ regulator-max-microvolt = <1800000>;
};
ldo5_reg: ldo5 {
+ /* VDDA_1V8_PHY: usb/sata/hdmi.. */
regulator-name = "ldo5";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
@@ -379,38 +386,43 @@
};
ldo6_reg: ldo6 {
+ /* VDDS_1V2_WKUP: hsic/ldo_emu_wkup */
regulator-name = "ldo6";
- regulator-min-microvolt = <1500000>;
- regulator-max-microvolt = <1500000>;
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
regulator-always-on;
regulator-boot-on;
};
ldo7_reg: ldo7 {
+ /* VDD_VPP: vpp1 */
regulator-name = "ldo7";
- regulator-min-microvolt = <1500000>;
- regulator-max-microvolt = <1500000>;
- regulator-always-on;
- regulator-boot-on;
+ regulator-min-microvolt = <2000000>;
+ regulator-max-microvolt = <2000000>;
+ /* Only for efuse reprograming! */
+ status = "disabled";
};
ldo8_reg: ldo8 {
+ /* VDD_3v0: Does not go anywhere */
regulator-name = "ldo8";
- regulator-min-microvolt = <1500000>;
- regulator-max-microvolt = <1500000>;
- regulator-always-on;
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
regulator-boot-on;
+ /* Unused */
+ status = "disabled";
};
ldo9_reg: ldo9 {
+ /* VCC_DV_SDIO: vdds_sdcard */
regulator-name = "ldo9";
regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <3300000>;
- regulator-always-on;
+ regulator-max-microvolt = <3000000>;
regulator-boot-on;
};
ldoln_reg: ldoln {
+ /* VDDA_1v8_REF: vdds_osc/mm_l4per.. */
regulator-name = "ldoln";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
@@ -419,12 +431,20 @@
};
ldousb_reg: ldousb {
+ /* VDDA_3V_USB: VDDA_USBHS33 */
regulator-name = "ldousb";
regulator-min-microvolt = <3250000>;
regulator-max-microvolt = <3250000>;
regulator-always-on;
regulator-boot-on;
};
+
+ regen3_reg: regen3 {
+ /* REGEN3 controls LDO9 supply to card */
+ regulator-name = "regen3";
+ regulator-always-on;
+ regulator-boot-on;
+ };
};
};
};
diff --git a/arch/arm/boot/dts/stih41x.dtsi b/arch/arm/boot/dts/stih41x.dtsi
index 7321403..f5b9898 100644
--- a/arch/arm/boot/dts/stih41x.dtsi
+++ b/arch/arm/boot/dts/stih41x.dtsi
@@ -6,10 +6,12 @@
#address-cells = <1>;
#size-cells = <0>;
cpu@0 {
+ device_type = "cpu";
compatible = "arm,cortex-a9";
reg = <0>;
};
cpu@1 {
+ device_type = "cpu";
compatible = "arm,cortex-a9";
reg = <1>;
};
diff --git a/arch/arm/boot/dts/tegra20-colibri-512.dtsi b/arch/arm/boot/dts/tegra20-colibri-512.dtsi
index 2fcb3f2..5592be6 100644
--- a/arch/arm/boot/dts/tegra20-colibri-512.dtsi
+++ b/arch/arm/boot/dts/tegra20-colibri-512.dtsi
@@ -457,6 +457,7 @@
};
usb-phy@c5004000 {
+ status = "okay";
nvidia,phy-reset-gpio = <&gpio TEGRA_GPIO(V, 1)
GPIO_ACTIVE_LOW>;
};
diff --git a/arch/arm/boot/dts/tegra20-seaboard.dts b/arch/arm/boot/dts/tegra20-seaboard.dts
index 365760b..40e6fb2 100644
--- a/arch/arm/boot/dts/tegra20-seaboard.dts
+++ b/arch/arm/boot/dts/tegra20-seaboard.dts
@@ -830,6 +830,8 @@
regulator-max-microvolt = <5000000>;
enable-active-high;
gpio = <&gpio 24 0>; /* PD0 */
+ regulator-always-on;
+ regulator-boot-on;
};
};
diff --git a/arch/arm/boot/dts/tegra20-trimslice.dts b/arch/arm/boot/dts/tegra20-trimslice.dts
index ed4b901..37c93d3 100644
--- a/arch/arm/boot/dts/tegra20-trimslice.dts
+++ b/arch/arm/boot/dts/tegra20-trimslice.dts
@@ -412,6 +412,8 @@
regulator-max-microvolt = <5000000>;
enable-active-high;
gpio = <&gpio 170 0>; /* PV2 */
+ regulator-always-on;
+ regulator-boot-on;
};
};
diff --git a/arch/arm/boot/dts/tegra20-whistler.dts b/arch/arm/boot/dts/tegra20-whistler.dts
index ab67c94..a3d0eba 100644
--- a/arch/arm/boot/dts/tegra20-whistler.dts
+++ b/arch/arm/boot/dts/tegra20-whistler.dts
@@ -588,6 +588,8 @@
regulator-max-microvolt = <5000000>;
enable-active-high;
gpio = <&tca6416 0 0>; /* GPIO_PMU0 */
+ regulator-always-on;
+ regulator-boot-on;
};
vbus3_reg: regulator@3 {
@@ -598,6 +600,8 @@
regulator-max-microvolt = <5000000>;
enable-active-high;
gpio = <&tca6416 1 0>; /* GPIO_PMU1 */
+ regulator-always-on;
+ regulator-boot-on;
};
};
diff --git a/arch/arm/include/asm/smp_plat.h b/arch/arm/include/asm/smp_plat.h
index 6462a72..a252c0b 100644
--- a/arch/arm/include/asm/smp_plat.h
+++ b/arch/arm/include/asm/smp_plat.h
@@ -88,4 +88,7 @@
{
return 1 << mpidr_hash.bits;
}
+
+extern int platform_can_cpu_hotplug(void);
+
#endif
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h
index f8b8965..b07c09e 100644
--- a/arch/arm/include/asm/spinlock.h
+++ b/arch/arm/include/asm/spinlock.h
@@ -107,7 +107,7 @@
" subs %1, %0, %0, ror #16\n"
" addeq %0, %0, %4\n"
" strexeq %2, %0, [%3]"
- : "=&r" (slock), "=&r" (contended), "=r" (res)
+ : "=&r" (slock), "=&r" (contended), "=&r" (res)
: "r" (&lock->slock), "I" (1 << TICKET_SHIFT)
: "cc");
} while (res);
@@ -168,17 +168,20 @@
static inline int arch_write_trylock(arch_rwlock_t *rw)
{
- unsigned long tmp;
+ unsigned long contended, res;
- __asm__ __volatile__(
-" ldrex %0, [%1]\n"
-" teq %0, #0\n"
-" strexeq %0, %2, [%1]"
- : "=&r" (tmp)
- : "r" (&rw->lock), "r" (0x80000000)
- : "cc");
+ do {
+ __asm__ __volatile__(
+ " ldrex %0, [%2]\n"
+ " mov %1, #0\n"
+ " teq %0, #0\n"
+ " strexeq %1, %3, [%2]"
+ : "=&r" (contended), "=&r" (res)
+ : "r" (&rw->lock), "r" (0x80000000)
+ : "cc");
+ } while (res);
- if (tmp == 0) {
+ if (!contended) {
smp_mb();
return 1;
} else {
@@ -254,18 +257,26 @@
static inline int arch_read_trylock(arch_rwlock_t *rw)
{
- unsigned long tmp, tmp2 = 1;
+ unsigned long contended, res;
- __asm__ __volatile__(
-" ldrex %0, [%2]\n"
-" adds %0, %0, #1\n"
-" strexpl %1, %0, [%2]\n"
- : "=&r" (tmp), "+r" (tmp2)
- : "r" (&rw->lock)
- : "cc");
+ do {
+ __asm__ __volatile__(
+ " ldrex %0, [%2]\n"
+ " mov %1, #0\n"
+ " adds %0, %0, #1\n"
+ " strexpl %1, %0, [%2]"
+ : "=&r" (contended), "=&r" (res)
+ : "r" (&rw->lock)
+ : "cc");
+ } while (res);
- smp_mb();
- return tmp2 == 0;
+ /* If the lock is negative, then it is already held for write. */
+ if (contended < 0x80000000) {
+ smp_mb();
+ return 1;
+ } else {
+ return 0;
+ }
}
/* read_can_lock - would read_trylock() succeed? */
diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h
index 46e7cfb..0baf7f0 100644
--- a/arch/arm/include/asm/tlb.h
+++ b/arch/arm/include/asm/tlb.h
@@ -43,6 +43,7 @@
struct mm_struct *mm;
unsigned int fullmm;
struct vm_area_struct *vma;
+ unsigned long start, end;
unsigned long range_start;
unsigned long range_end;
unsigned int nr;
@@ -107,10 +108,12 @@
}
static inline void
-tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int fullmm)
+tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
{
tlb->mm = mm;
- tlb->fullmm = fullmm;
+ tlb->fullmm = !(start | (end+1));
+ tlb->start = start;
+ tlb->end = end;
tlb->vma = NULL;
tlb->max = ARRAY_SIZE(tlb->local);
tlb->pages = tlb->local;
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index d40d0ef..9cbe70c 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -357,7 +357,8 @@
.endm
.macro kuser_cmpxchg_check
-#if !defined(CONFIG_CPU_32v6K) && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
+#if !defined(CONFIG_CPU_32v6K) && defined(CONFIG_KUSER_HELPERS) && \
+ !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
#ifndef CONFIG_MMU
#warning "NPTL on non MMU needs fixing"
#else
diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c
index 25442f4..918875d 100644
--- a/arch/arm/kernel/fiq.c
+++ b/arch/arm/kernel/fiq.c
@@ -84,17 +84,14 @@
void set_fiq_handler(void *start, unsigned int length)
{
-#if defined(CONFIG_CPU_USE_DOMAINS)
- void *base = (void *)0xffff0000;
-#else
void *base = vectors_page;
-#endif
unsigned offset = FIQ_OFFSET;
memcpy(base + offset, start, length);
+ if (!cache_is_vipt_nonaliasing())
+ flush_icache_range((unsigned long)base + offset, offset +
+ length);
flush_icache_range(0xffff0000 + offset, 0xffff0000 + offset + length);
- if (!vectors_high())
- flush_icache_range(offset, offset + length);
}
int claim_fiq(struct fiq_handler *f)
diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c
index 4fb074c..57221e3 100644
--- a/arch/arm/kernel/machine_kexec.c
+++ b/arch/arm/kernel/machine_kexec.c
@@ -15,6 +15,7 @@
#include <asm/mmu_context.h>
#include <asm/cacheflush.h>
#include <asm/mach-types.h>
+#include <asm/smp_plat.h>
#include <asm/system_misc.h>
extern const unsigned char relocate_new_kernel[];
@@ -39,6 +40,14 @@
int i, err;
/*
+ * Validate that if the current HW supports SMP, then the SW supports
+ * and implements CPU hotplug for the current HW. If not, we won't be
+ * able to kexec reliably, so fail the prepare operation.
+ */
+ if (num_possible_cpus() > 1 && !platform_can_cpu_hotplug())
+ return -EINVAL;
+
+ /*
* No segment at default ATAGs address. try to locate
* a dtb using magic.
*/
@@ -73,6 +82,7 @@
crash_save_cpu(®s, smp_processor_id());
flush_cache_all();
+ set_cpu_online(smp_processor_id(), false);
atomic_dec(&waiting_for_crash_ipi);
while (1)
cpu_relax();
@@ -134,10 +144,13 @@
unsigned long reboot_code_buffer_phys;
void *reboot_code_buffer;
- if (num_online_cpus() > 1) {
- pr_err("kexec: error: multiple CPUs still online\n");
- return;
- }
+ /*
+ * This can only happen if machine_shutdown() failed to disable some
+ * CPU, and that can only happen if the checks in
+ * machine_kexec_prepare() were not correct. If this fails, we can't
+ * reliably kexec anyway, so BUG_ON is appropriate.
+ */
+ BUG_ON(num_online_cpus() > 1);
page_list = image->head & PAGE_MASK;
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index d9f5cd4..e186ee1 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -53,7 +53,12 @@
static int
armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
{
- int mapping = (*event_map)[config];
+ int mapping;
+
+ if (config >= PERF_COUNT_HW_MAX)
+ return -EINVAL;
+
+ mapping = (*event_map)[config];
return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
}
@@ -253,6 +258,9 @@
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
struct pmu *leader_pmu = event->group_leader->pmu;
+ if (is_software_event(event))
+ return 1;
+
if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF)
return 1;
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 536c85f..94f6b05 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -462,7 +462,7 @@
{
return in_gate_area(NULL, addr);
}
-#define is_gate_vma(vma) ((vma) = &gate_vma)
+#define is_gate_vma(vma) ((vma) == &gate_vma)
#else
#define is_gate_vma(vma) 0
#endif
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index c2b4f8f..2dc19349e 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -145,6 +145,16 @@
return -ENOSYS;
}
+int platform_can_cpu_hotplug(void)
+{
+#ifdef CONFIG_HOTPLUG_CPU
+ if (smp_ops.cpu_kill)
+ return 1;
+#endif
+
+ return 0;
+}
+
#ifdef CONFIG_HOTPLUG_CPU
static void percpu_timer_stop(void);
diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c
index 4a51990..db9cf69 100644
--- a/arch/arm/kvm/coproc.c
+++ b/arch/arm/kvm/coproc.c
@@ -146,7 +146,11 @@
#define access_pmintenclr pm_fake
/* Architected CP15 registers.
- * Important: Must be sorted ascending by CRn, CRM, Op1, Op2
+ * CRn denotes the primary register number, but is copied to the CRm in the
+ * user space API for 64-bit register access in line with the terminology used
+ * in the ARM ARM.
+ * Important: Must be sorted ascending by CRn, CRM, Op1, Op2 and with 64-bit
+ * registers preceding 32-bit ones.
*/
static const struct coproc_reg cp15_regs[] = {
/* CSSELR: swapped by interrupt.S. */
@@ -154,8 +158,8 @@
NULL, reset_unknown, c0_CSSELR },
/* TTBR0/TTBR1: swapped by interrupt.S. */
- { CRm( 2), Op1( 0), is64, NULL, reset_unknown64, c2_TTBR0 },
- { CRm( 2), Op1( 1), is64, NULL, reset_unknown64, c2_TTBR1 },
+ { CRm64( 2), Op1( 0), is64, NULL, reset_unknown64, c2_TTBR0 },
+ { CRm64( 2), Op1( 1), is64, NULL, reset_unknown64, c2_TTBR1 },
/* TTBCR: swapped by interrupt.S. */
{ CRn( 2), CRm( 0), Op1( 0), Op2( 2), is32,
@@ -182,7 +186,7 @@
NULL, reset_unknown, c6_IFAR },
/* PAR swapped by interrupt.S */
- { CRn( 7), Op1( 0), is64, NULL, reset_unknown64, c7_PAR },
+ { CRm64( 7), Op1( 0), is64, NULL, reset_unknown64, c7_PAR },
/*
* DC{C,I,CI}SW operations:
@@ -399,12 +403,13 @@
| KVM_REG_ARM_OPC1_MASK))
return false;
params->is_64bit = true;
- params->CRm = ((id & KVM_REG_ARM_CRM_MASK)
+ /* CRm to CRn: see cp15_to_index for details */
+ params->CRn = ((id & KVM_REG_ARM_CRM_MASK)
>> KVM_REG_ARM_CRM_SHIFT);
params->Op1 = ((id & KVM_REG_ARM_OPC1_MASK)
>> KVM_REG_ARM_OPC1_SHIFT);
params->Op2 = 0;
- params->CRn = 0;
+ params->CRm = 0;
return true;
default:
return false;
@@ -898,7 +903,14 @@
if (reg->is_64) {
val |= KVM_REG_SIZE_U64;
val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT);
- val |= (reg->CRm << KVM_REG_ARM_CRM_SHIFT);
+ /*
+ * CRn always denotes the primary coproc. reg. nr. for the
+ * in-kernel representation, but the user space API uses the
+ * CRm for the encoding, because it is modelled after the
+ * MRRC/MCRR instructions: see the ARM ARM rev. c page
+ * B3-1445
+ */
+ val |= (reg->CRn << KVM_REG_ARM_CRM_SHIFT);
} else {
val |= KVM_REG_SIZE_U32;
val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT);
diff --git a/arch/arm/kvm/coproc.h b/arch/arm/kvm/coproc.h
index b7301d3..0461d5c 100644
--- a/arch/arm/kvm/coproc.h
+++ b/arch/arm/kvm/coproc.h
@@ -135,6 +135,8 @@
return -1;
if (i1->CRn != i2->CRn)
return i1->CRn - i2->CRn;
+ if (i1->is_64 != i2->is_64)
+ return i2->is_64 - i1->is_64;
if (i1->CRm != i2->CRm)
return i1->CRm - i2->CRm;
if (i1->Op1 != i2->Op1)
@@ -145,6 +147,7 @@
#define CRn(_x) .CRn = _x
#define CRm(_x) .CRm = _x
+#define CRm64(_x) .CRn = _x, .CRm = 0
#define Op1(_x) .Op1 = _x
#define Op2(_x) .Op2 = _x
#define is64 .is_64 = true
diff --git a/arch/arm/kvm/coproc_a15.c b/arch/arm/kvm/coproc_a15.c
index 685063a..cf93472 100644
--- a/arch/arm/kvm/coproc_a15.c
+++ b/arch/arm/kvm/coproc_a15.c
@@ -114,7 +114,11 @@
/*
* A15-specific CP15 registers.
- * Important: Must be sorted ascending by CRn, CRM, Op1, Op2
+ * CRn denotes the primary register number, but is copied to the CRm in the
+ * user space API for 64-bit register access in line with the terminology used
+ * in the ARM ARM.
+ * Important: Must be sorted ascending by CRn, CRM, Op1, Op2 and with 64-bit
+ * registers preceding 32-bit ones.
*/
static const struct coproc_reg a15_regs[] = {
/* MPIDR: we use VMPIDR for guest access. */
diff --git a/arch/arm/kvm/mmio.c b/arch/arm/kvm/mmio.c
index b8e06b7..0c25d94 100644
--- a/arch/arm/kvm/mmio.c
+++ b/arch/arm/kvm/mmio.c
@@ -63,7 +63,8 @@
static int decode_hsr(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
struct kvm_exit_mmio *mmio)
{
- unsigned long rt, len;
+ unsigned long rt;
+ int len;
bool is_write, sign_extend;
if (kvm_vcpu_dabt_isextabt(vcpu)) {
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index ca6bea4..0988d9e 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -85,6 +85,12 @@
return p;
}
+static bool page_empty(void *ptr)
+{
+ struct page *ptr_page = virt_to_page(ptr);
+ return page_count(ptr_page) == 1;
+}
+
static void clear_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr)
{
pmd_t *pmd_table = pmd_offset(pud, 0);
@@ -103,12 +109,6 @@
put_page(virt_to_page(pmd));
}
-static bool pmd_empty(pmd_t *pmd)
-{
- struct page *pmd_page = virt_to_page(pmd);
- return page_count(pmd_page) == 1;
-}
-
static void clear_pte_entry(struct kvm *kvm, pte_t *pte, phys_addr_t addr)
{
if (pte_present(*pte)) {
@@ -118,12 +118,6 @@
}
}
-static bool pte_empty(pte_t *pte)
-{
- struct page *pte_page = virt_to_page(pte);
- return page_count(pte_page) == 1;
-}
-
static void unmap_range(struct kvm *kvm, pgd_t *pgdp,
unsigned long long start, u64 size)
{
@@ -132,37 +126,37 @@
pmd_t *pmd;
pte_t *pte;
unsigned long long addr = start, end = start + size;
- u64 range;
+ u64 next;
while (addr < end) {
pgd = pgdp + pgd_index(addr);
pud = pud_offset(pgd, addr);
if (pud_none(*pud)) {
- addr += PUD_SIZE;
+ addr = pud_addr_end(addr, end);
continue;
}
pmd = pmd_offset(pud, addr);
if (pmd_none(*pmd)) {
- addr += PMD_SIZE;
+ addr = pmd_addr_end(addr, end);
continue;
}
pte = pte_offset_kernel(pmd, addr);
clear_pte_entry(kvm, pte, addr);
- range = PAGE_SIZE;
+ next = addr + PAGE_SIZE;
/* If we emptied the pte, walk back up the ladder */
- if (pte_empty(pte)) {
+ if (page_empty(pte)) {
clear_pmd_entry(kvm, pmd, addr);
- range = PMD_SIZE;
- if (pmd_empty(pmd)) {
+ next = pmd_addr_end(addr, end);
+ if (page_empty(pmd) && !page_empty(pud)) {
clear_pud_entry(kvm, pud, addr);
- range = PUD_SIZE;
+ next = pud_addr_end(addr, end);
}
}
- addr += range;
+ addr = next;
}
}
diff --git a/arch/arm/mach-at91/at91sam9x5.c b/arch/arm/mach-at91/at91sam9x5.c
index 2abee66..916e5a1 100644
--- a/arch/arm/mach-at91/at91sam9x5.c
+++ b/arch/arm/mach-at91/at91sam9x5.c
@@ -227,6 +227,8 @@
CLKDEV_CON_DEV_ID("usart", "f8020000.serial", &usart1_clk),
CLKDEV_CON_DEV_ID("usart", "f8024000.serial", &usart2_clk),
CLKDEV_CON_DEV_ID("usart", "f8028000.serial", &usart3_clk),
+ CLKDEV_CON_DEV_ID("usart", "f8040000.serial", &uart0_clk),
+ CLKDEV_CON_DEV_ID("usart", "f8044000.serial", &uart1_clk),
CLKDEV_CON_DEV_ID("t0_clk", "f8008000.timer", &tcb0_clk),
CLKDEV_CON_DEV_ID("t0_clk", "f800c000.timer", &tcb0_clk),
CLKDEV_CON_DEV_ID("mci_clk", "f0008000.mmc", &mmc0_clk),
diff --git a/arch/arm/mach-davinci/board-dm355-leopard.c b/arch/arm/mach-davinci/board-dm355-leopard.c
index dff4ddc..139e42d 100644
--- a/arch/arm/mach-davinci/board-dm355-leopard.c
+++ b/arch/arm/mach-davinci/board-dm355-leopard.c
@@ -75,6 +75,7 @@
.parts = davinci_nand_partitions,
.nr_parts = ARRAY_SIZE(davinci_nand_partitions),
.ecc_mode = NAND_ECC_HW_SYNDROME,
+ .ecc_bits = 4,
.bbt_options = NAND_BBT_USE_FLASH,
};
diff --git a/arch/arm/mach-davinci/board-dm644x-evm.c b/arch/arm/mach-davinci/board-dm644x-evm.c
index a33686a..fa4bfaf 100644
--- a/arch/arm/mach-davinci/board-dm644x-evm.c
+++ b/arch/arm/mach-davinci/board-dm644x-evm.c
@@ -153,6 +153,7 @@
.parts = davinci_evm_nandflash_partition,
.nr_parts = ARRAY_SIZE(davinci_evm_nandflash_partition),
.ecc_mode = NAND_ECC_HW,
+ .ecc_bits = 1,
.bbt_options = NAND_BBT_USE_FLASH,
.timing = &davinci_evm_nandflash_timing,
};
diff --git a/arch/arm/mach-davinci/board-dm646x-evm.c b/arch/arm/mach-davinci/board-dm646x-evm.c
index fbb8e5a..0c005e8 100644
--- a/arch/arm/mach-davinci/board-dm646x-evm.c
+++ b/arch/arm/mach-davinci/board-dm646x-evm.c
@@ -90,6 +90,7 @@
.parts = davinci_nand_partitions,
.nr_parts = ARRAY_SIZE(davinci_nand_partitions),
.ecc_mode = NAND_ECC_HW,
+ .ecc_bits = 1,
.options = 0,
};
diff --git a/arch/arm/mach-davinci/board-neuros-osd2.c b/arch/arm/mach-davinci/board-neuros-osd2.c
index 2bc112a..808233b 100644
--- a/arch/arm/mach-davinci/board-neuros-osd2.c
+++ b/arch/arm/mach-davinci/board-neuros-osd2.c
@@ -88,6 +88,7 @@
.parts = davinci_ntosd2_nandflash_partition,
.nr_parts = ARRAY_SIZE(davinci_ntosd2_nandflash_partition),
.ecc_mode = NAND_ECC_HW,
+ .ecc_bits = 1,
.bbt_options = NAND_BBT_USE_FLASH,
};
diff --git a/arch/arm/mach-msm/Kconfig b/arch/arm/mach-msm/Kconfig
index 614e41e..905efc8 100644
--- a/arch/arm/mach-msm/Kconfig
+++ b/arch/arm/mach-msm/Kconfig
@@ -121,8 +121,7 @@
bool
config MSM_GPIOMUX
- depends on !(ARCH_MSM8X60 || ARCH_MSM8960)
- bool "MSM V1 TLMM GPIOMUX architecture"
+ bool
help
Support for MSM V1 TLMM GPIOMUX architecture.
diff --git a/arch/arm/mach-msm/gpiomux-v1.c b/arch/arm/mach-msm/gpiomux-v1.c
deleted file mode 100644
index 27de2ab..0000000
--- a/arch/arm/mach-msm/gpiomux-v1.c
+++ /dev/null
@@ -1,33 +0,0 @@
-/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- */
-#include <linux/kernel.h>
-#include "gpiomux.h"
-#include "proc_comm.h"
-
-void __msm_gpiomux_write(unsigned gpio, gpiomux_config_t val)
-{
- unsigned tlmm_config = (val & ~GPIOMUX_CTL_MASK) |
- ((gpio & 0x3ff) << 4);
- unsigned tlmm_disable = 0;
- int rc;
-
- rc = msm_proc_comm(PCOM_RPC_GPIO_TLMM_CONFIG_EX,
- &tlmm_config, &tlmm_disable);
- if (rc)
- pr_err("%s: unexpected proc_comm failure %d: %08x %08x\n",
- __func__, rc, tlmm_config, tlmm_disable);
-}
diff --git a/arch/arm/mach-msm/gpiomux.h b/arch/arm/mach-msm/gpiomux.h
index 8e82f41..4410d77 100644
--- a/arch/arm/mach-msm/gpiomux.h
+++ b/arch/arm/mach-msm/gpiomux.h
@@ -73,16 +73,6 @@
int msm_gpiomux_write(unsigned gpio,
gpiomux_config_t active,
gpiomux_config_t suspended);
-
-/* Architecture-internal function for use by the framework only.
- * This function can assume the following:
- * - the gpio value has passed a bounds-check
- * - the gpiomux spinlock has been obtained
- *
- * This function is not for public consumption. External users
- * should use msm_gpiomux_write.
- */
-void __msm_gpiomux_write(unsigned gpio, gpiomux_config_t val);
#else
static inline int msm_gpiomux_write(unsigned gpio,
gpiomux_config_t active,
diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
index f6eeb87..827d150 100644
--- a/arch/arm/mach-omap2/board-n8x0.c
+++ b/arch/arm/mach-omap2/board-n8x0.c
@@ -122,11 +122,7 @@
};
static struct musb_hdrc_platform_data tusb_data = {
-#ifdef CONFIG_USB_GADGET_MUSB_HDRC
.mode = MUSB_OTG,
-#else
- .mode = MUSB_HOST,
-#endif
.set_power = tusb_set_power,
.min_power = 25, /* x2 = 50 mA drawn from VBUS as peripheral */
.power = 100, /* Max 100 mA VBUS for host mode */
diff --git a/arch/arm/mach-omap2/board-rx51.c b/arch/arm/mach-omap2/board-rx51.c
index d2ea68e..773510556 100644
--- a/arch/arm/mach-omap2/board-rx51.c
+++ b/arch/arm/mach-omap2/board-rx51.c
@@ -85,7 +85,7 @@
static struct omap_musb_board_data musb_board_data = {
.interface_type = MUSB_INTERFACE_ULPI,
- .mode = MUSB_PERIPHERAL,
+ .mode = MUSB_OTG,
.power = 0,
};
diff --git a/arch/arm/mach-omap2/dss-common.c b/arch/arm/mach-omap2/dss-common.c
index 393aeef..043e570 100644
--- a/arch/arm/mach-omap2/dss-common.c
+++ b/arch/arm/mach-omap2/dss-common.c
@@ -42,7 +42,7 @@
/* Using generic display panel */
static struct tfp410_platform_data omap4_dvi_panel = {
- .i2c_bus_num = 3,
+ .i2c_bus_num = 2,
.power_down_gpio = PANDA_DVI_TFP410_POWER_DOWN_GPIO,
};
diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c
index 5cc9287..f99f68e 100644
--- a/arch/arm/mach-omap2/omap_device.c
+++ b/arch/arm/mach-omap2/omap_device.c
@@ -129,6 +129,7 @@
struct device_node *node = pdev->dev.of_node;
const char *oh_name;
int oh_cnt, i, ret = 0;
+ bool device_active = false;
oh_cnt = of_property_count_strings(node, "ti,hwmods");
if (oh_cnt <= 0) {
@@ -152,6 +153,8 @@
goto odbfd_exit1;
}
hwmods[i] = oh;
+ if (oh->flags & HWMOD_INIT_NO_IDLE)
+ device_active = true;
}
od = omap_device_alloc(pdev, hwmods, oh_cnt);
@@ -172,6 +175,11 @@
pdev->dev.pm_domain = &omap_device_pm_domain;
+ if (device_active) {
+ omap_device_enable(pdev);
+ pm_runtime_set_active(&pdev->dev);
+ }
+
odbfd_exit1:
kfree(hwmods);
odbfd_exit:
@@ -842,6 +850,7 @@
{
struct platform_device *pdev = to_platform_device(dev);
struct omap_device *od = to_omap_device(pdev);
+ int i;
if (!od)
return 0;
@@ -850,6 +859,15 @@
* If omap_device state is enabled, but has no driver bound,
* idle it.
*/
+
+ /*
+ * Some devices (like memory controllers) are always kept
+ * enabled, and should not be idled even with no drivers.
+ */
+ for (i = 0; i < od->hwmods_cnt; i++)
+ if (od->hwmods[i]->flags & HWMOD_INIT_NO_IDLE)
+ return 0;
+
if (od->_driver_status != BUS_NOTIFY_BOUND_DRIVER) {
if (od->_state == OMAP_DEVICE_STATE_ENABLED) {
dev_warn(dev, "%s: enabled but no driver. Idling\n",
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index 7341eff..7f4db12 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -2386,7 +2386,7 @@
np = of_dev_hwmod_lookup(of_find_node_by_name(NULL, "ocp"), oh);
if (np)
- va_start = of_iomap(np, 0);
+ va_start = of_iomap(np, oh->mpu_rt_idx);
} else {
va_start = ioremap(mem->pa_start, mem->pa_end - mem->pa_start);
}
diff --git a/arch/arm/mach-omap2/omap_hwmod.h b/arch/arm/mach-omap2/omap_hwmod.h
index aab33fd..e1482a9 100644
--- a/arch/arm/mach-omap2/omap_hwmod.h
+++ b/arch/arm/mach-omap2/omap_hwmod.h
@@ -95,6 +95,54 @@
#define MODULEMODE_HWCTRL 1
#define MODULEMODE_SWCTRL 2
+#define DEBUG_OMAP2UART1_FLAGS 0
+#define DEBUG_OMAP2UART2_FLAGS 0
+#define DEBUG_OMAP2UART3_FLAGS 0
+#define DEBUG_OMAP3UART3_FLAGS 0
+#define DEBUG_OMAP3UART4_FLAGS 0
+#define DEBUG_OMAP4UART3_FLAGS 0
+#define DEBUG_OMAP4UART4_FLAGS 0
+#define DEBUG_TI81XXUART1_FLAGS 0
+#define DEBUG_TI81XXUART2_FLAGS 0
+#define DEBUG_TI81XXUART3_FLAGS 0
+#define DEBUG_AM33XXUART1_FLAGS 0
+
+#define DEBUG_OMAPUART_FLAGS (HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET)
+
+#if defined(CONFIG_DEBUG_OMAP2UART1)
+#undef DEBUG_OMAP2UART1_FLAGS
+#define DEBUG_OMAP2UART1_FLAGS DEBUG_OMAPUART_FLAGS
+#elif defined(CONFIG_DEBUG_OMAP2UART2)
+#undef DEBUG_OMAP2UART2_FLAGS
+#define DEBUG_OMAP2UART2_FLAGS DEBUG_OMAPUART_FLAGS
+#elif defined(CONFIG_DEBUG_OMAP2UART3)
+#undef DEBUG_OMAP2UART3_FLAGS
+#define DEBUG_OMAP2UART3_FLAGS DEBUG_OMAPUART_FLAGS
+#elif defined(CONFIG_DEBUG_OMAP3UART3)
+#undef DEBUG_OMAP3UART3_FLAGS
+#define DEBUG_OMAP3UART3_FLAGS DEBUG_OMAPUART_FLAGS
+#elif defined(CONFIG_DEBUG_OMAP3UART4)
+#undef DEBUG_OMAP3UART4_FLAGS
+#define DEBUG_OMAP3UART4_FLAGS DEBUG_OMAPUART_FLAGS
+#elif defined(CONFIG_DEBUG_OMAP4UART3)
+#undef DEBUG_OMAP4UART3_FLAGS
+#define DEBUG_OMAP4UART3_FLAGS DEBUG_OMAPUART_FLAGS
+#elif defined(CONFIG_DEBUG_OMAP4UART4)
+#undef DEBUG_OMAP4UART4_FLAGS
+#define DEBUG_OMAP4UART4_FLAGS DEBUG_OMAPUART_FLAGS
+#elif defined(CONFIG_DEBUG_TI81XXUART1)
+#undef DEBUG_TI81XXUART1_FLAGS
+#define DEBUG_TI81XXUART1_FLAGS DEBUG_OMAPUART_FLAGS
+#elif defined(CONFIG_DEBUG_TI81XXUART2)
+#undef DEBUG_TI81XXUART2_FLAGS
+#define DEBUG_TI81XXUART2_FLAGS DEBUG_OMAPUART_FLAGS
+#elif defined(CONFIG_DEBUG_TI81XXUART3)
+#undef DEBUG_TI81XXUART3_FLAGS
+#define DEBUG_TI81XXUART3_FLAGS DEBUG_OMAPUART_FLAGS
+#elif defined(CONFIG_DEBUG_AM33XXUART1)
+#undef DEBUG_AM33XXUART1_FLAGS
+#define DEBUG_AM33XXUART1_FLAGS DEBUG_OMAPUART_FLAGS
+#endif
/**
* struct omap_hwmod_mux_info - hwmod specific mux configuration
@@ -568,6 +616,7 @@
* @voltdm: pointer to voltage domain (filled in at runtime)
* @dev_attr: arbitrary device attributes that can be passed to the driver
* @_sysc_cache: internal-use hwmod flags
+ * @mpu_rt_idx: index of device address space for register target (for DT boot)
* @_mpu_rt_va: cached register target start address (internal use)
* @_mpu_port: cached MPU register target slave (internal use)
* @opt_clks_cnt: number of @opt_clks
@@ -617,6 +666,7 @@
struct list_head node;
struct omap_hwmod_ocp_if *_mpu_port;
u16 flags;
+ u8 mpu_rt_idx;
u8 response_lat;
u8 rst_lines_cnt;
u8 opt_clks_cnt;
diff --git a/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c b/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c
index d05fc7b..56cebb0 100644
--- a/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c
@@ -512,7 +512,7 @@
.mpu_irqs = omap2_uart1_mpu_irqs,
.sdma_reqs = omap2_uart1_sdma_reqs,
.main_clk = "uart1_fck",
- .flags = HWMOD_SWSUP_SIDLE_ACT,
+ .flags = DEBUG_OMAP2UART1_FLAGS | HWMOD_SWSUP_SIDLE_ACT,
.prcm = {
.omap2 = {
.module_offs = CORE_MOD,
@@ -532,7 +532,7 @@
.mpu_irqs = omap2_uart2_mpu_irqs,
.sdma_reqs = omap2_uart2_sdma_reqs,
.main_clk = "uart2_fck",
- .flags = HWMOD_SWSUP_SIDLE_ACT,
+ .flags = DEBUG_OMAP2UART2_FLAGS | HWMOD_SWSUP_SIDLE_ACT,
.prcm = {
.omap2 = {
.module_offs = CORE_MOD,
@@ -552,7 +552,7 @@
.mpu_irqs = omap2_uart3_mpu_irqs,
.sdma_reqs = omap2_uart3_sdma_reqs,
.main_clk = "uart3_fck",
- .flags = HWMOD_SWSUP_SIDLE_ACT,
+ .flags = DEBUG_OMAP2UART3_FLAGS | HWMOD_SWSUP_SIDLE_ACT,
.prcm = {
.omap2 = {
.module_offs = CORE_MOD,
diff --git a/arch/arm/mach-omap2/omap_hwmod_33xx_data.c b/arch/arm/mach-omap2/omap_hwmod_33xx_data.c
index 28bbd56..eb2f3b9 100644
--- a/arch/arm/mach-omap2/omap_hwmod_33xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_33xx_data.c
@@ -562,6 +562,7 @@
.clkdm_name = "cpsw_125mhz_clkdm",
.flags = (HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY),
.main_clk = "cpsw_125mhz_gclk",
+ .mpu_rt_idx = 1,
.prcm = {
.omap4 = {
.clkctrl_offs = AM33XX_CM_PER_CPGMAC0_CLKCTRL_OFFSET,
@@ -1512,7 +1513,7 @@
.name = "uart1",
.class = &uart_class,
.clkdm_name = "l4_wkup_clkdm",
- .flags = HWMOD_SWSUP_SIDLE_ACT,
+ .flags = DEBUG_AM33XXUART1_FLAGS | HWMOD_SWSUP_SIDLE_ACT,
.main_clk = "dpll_per_m2_div4_wkupdm_ck",
.prcm = {
.omap4 = {
diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
index f7a3df2..0c3a427 100644
--- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
@@ -490,7 +490,7 @@
.mpu_irqs = omap2_uart1_mpu_irqs,
.sdma_reqs = omap2_uart1_sdma_reqs,
.main_clk = "uart1_fck",
- .flags = HWMOD_SWSUP_SIDLE_ACT,
+ .flags = DEBUG_TI81XXUART1_FLAGS | HWMOD_SWSUP_SIDLE_ACT,
.prcm = {
.omap2 = {
.module_offs = CORE_MOD,
@@ -509,7 +509,7 @@
.mpu_irqs = omap2_uart2_mpu_irqs,
.sdma_reqs = omap2_uart2_sdma_reqs,
.main_clk = "uart2_fck",
- .flags = HWMOD_SWSUP_SIDLE_ACT,
+ .flags = DEBUG_TI81XXUART2_FLAGS | HWMOD_SWSUP_SIDLE_ACT,
.prcm = {
.omap2 = {
.module_offs = CORE_MOD,
@@ -528,7 +528,8 @@
.mpu_irqs = omap2_uart3_mpu_irqs,
.sdma_reqs = omap2_uart3_sdma_reqs,
.main_clk = "uart3_fck",
- .flags = HWMOD_SWSUP_SIDLE_ACT,
+ .flags = DEBUG_OMAP3UART3_FLAGS | DEBUG_TI81XXUART3_FLAGS |
+ HWMOD_SWSUP_SIDLE_ACT,
.prcm = {
.omap2 = {
.module_offs = OMAP3430_PER_MOD,
@@ -558,7 +559,7 @@
.mpu_irqs = uart4_mpu_irqs,
.sdma_reqs = uart4_sdma_reqs,
.main_clk = "uart4_fck",
- .flags = HWMOD_SWSUP_SIDLE_ACT,
+ .flags = DEBUG_OMAP3UART4_FLAGS | HWMOD_SWSUP_SIDLE_ACT,
.prcm = {
.omap2 = {
.module_offs = OMAP3430_PER_MOD,
diff --git a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
index d04b5e6..9c3b504 100644
--- a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
@@ -2858,8 +2858,7 @@
.name = "uart3",
.class = &omap44xx_uart_hwmod_class,
.clkdm_name = "l4_per_clkdm",
- .flags = HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET |
- HWMOD_SWSUP_SIDLE_ACT,
+ .flags = DEBUG_OMAP4UART3_FLAGS | HWMOD_SWSUP_SIDLE_ACT,
.main_clk = "func_48m_fclk",
.prcm = {
.omap4 = {
@@ -2875,7 +2874,7 @@
.name = "uart4",
.class = &omap44xx_uart_hwmod_class,
.clkdm_name = "l4_per_clkdm",
- .flags = HWMOD_SWSUP_SIDLE_ACT,
+ .flags = DEBUG_OMAP4UART4_FLAGS | HWMOD_SWSUP_SIDLE_ACT,
.main_clk = "func_48m_fclk",
.prcm = {
.omap4 = {
diff --git a/arch/arm/mach-omap2/omap_hwmod_54xx_data.c b/arch/arm/mach-omap2/omap_hwmod_54xx_data.c
index f37ae96..3c70f5c 100644
--- a/arch/arm/mach-omap2/omap_hwmod_54xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_54xx_data.c
@@ -1375,7 +1375,7 @@
.name = "uart3",
.class = &omap54xx_uart_hwmod_class,
.clkdm_name = "l4per_clkdm",
- .flags = HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET,
+ .flags = DEBUG_OMAP4UART3_FLAGS,
.main_clk = "func_48m_fclk",
.prcm = {
.omap4 = {
@@ -1391,6 +1391,7 @@
.name = "uart4",
.class = &omap54xx_uart_hwmod_class,
.clkdm_name = "l4per_clkdm",
+ .flags = DEBUG_OMAP4UART4_FLAGS,
.main_clk = "func_48m_fclk",
.prcm = {
.omap4 = {
diff --git a/arch/arm/mach-omap2/serial.c b/arch/arm/mach-omap2/serial.c
index 3a674de..a388f8c 100644
--- a/arch/arm/mach-omap2/serial.c
+++ b/arch/arm/mach-omap2/serial.c
@@ -208,17 +208,6 @@
pr_info("%s used as console in debug mode: uart%d clocks will not be gated",
uart_name, uart->num);
}
-
- /*
- * omap-uart can be used for earlyprintk logs
- * So if omap-uart is used as console then prevent
- * uart reset and idle to get logs from omap-uart
- * until uart console driver is available to take
- * care for console messages.
- * Idling or resetting omap-uart while printing logs
- * early boot logs can stall the boot-up.
- */
- oh->flags |= HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET;
}
} while (1);
diff --git a/arch/arm/mach-omap2/usb-musb.c b/arch/arm/mach-omap2/usb-musb.c
index 8c4de27..bc89723 100644
--- a/arch/arm/mach-omap2/usb-musb.c
+++ b/arch/arm/mach-omap2/usb-musb.c
@@ -38,11 +38,8 @@
};
static struct musb_hdrc_platform_data musb_plat = {
-#ifdef CONFIG_USB_GADGET_MUSB_HDRC
.mode = MUSB_OTG,
-#else
- .mode = MUSB_HOST,
-#endif
+
/* .clock is set dynamically */
.config = &musb_config,
diff --git a/arch/arm/mach-shmobile/board-armadillo800eva.c b/arch/arm/mach-shmobile/board-armadillo800eva.c
index e115f67..c5be60d 100644
--- a/arch/arm/mach-shmobile/board-armadillo800eva.c
+++ b/arch/arm/mach-shmobile/board-armadillo800eva.c
@@ -1162,9 +1162,6 @@
gpio_request_one(61, GPIOF_OUT_INIT_HIGH, NULL); /* LCDDON */
gpio_request_one(202, GPIOF_OUT_INIT_LOW, NULL); /* LCD0_LED_CONT */
- /* Touchscreen */
- gpio_request_one(166, GPIOF_OUT_INIT_HIGH, NULL); /* TP_RST_B */
-
/* GETHER */
gpio_request_one(18, GPIOF_OUT_INIT_HIGH, NULL); /* PHY_RST */
diff --git a/arch/arm/mach-shmobile/board-bockw.c b/arch/arm/mach-shmobile/board-bockw.c
index d555464..3354a85 100644
--- a/arch/arm/mach-shmobile/board-bockw.c
+++ b/arch/arm/mach-shmobile/board-bockw.c
@@ -167,7 +167,13 @@
"usb1", "usb1"),
/* SDHI0 */
PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.0", "pfc-r8a7778",
- "sdhi0", "sdhi0"),
+ "sdhi0_data4", "sdhi0"),
+ PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.0", "pfc-r8a7778",
+ "sdhi0_ctrl", "sdhi0"),
+ PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.0", "pfc-r8a7778",
+ "sdhi0_cd", "sdhi0"),
+ PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.0", "pfc-r8a7778",
+ "sdhi0_wp", "sdhi0"),
};
#define FPGA 0x18200000
diff --git a/arch/arm/mach-shmobile/board-lager.c b/arch/arm/mach-shmobile/board-lager.c
index d73e21d..8d6bd5c 100644
--- a/arch/arm/mach-shmobile/board-lager.c
+++ b/arch/arm/mach-shmobile/board-lager.c
@@ -59,7 +59,7 @@
#define GPIO_KEY(c, g, d, ...) \
{ .code = c, .gpio = g, .desc = d, .active_low = 1 }
-static __initdata struct gpio_keys_button gpio_buttons[] = {
+static struct gpio_keys_button gpio_buttons[] = {
GPIO_KEY(KEY_4, RCAR_GP_PIN(1, 28), "SW2-pin4"),
GPIO_KEY(KEY_3, RCAR_GP_PIN(1, 26), "SW2-pin3"),
GPIO_KEY(KEY_2, RCAR_GP_PIN(1, 24), "SW2-pin2"),
diff --git a/arch/arm/mach-sti/headsmp.S b/arch/arm/mach-sti/headsmp.S
index 78ebc75..4c09bae 100644
--- a/arch/arm/mach-sti/headsmp.S
+++ b/arch/arm/mach-sti/headsmp.S
@@ -16,8 +16,6 @@
#include <linux/linkage.h>
#include <linux/init.h>
- __INIT
-
/*
* ST specific entry point for secondary CPUs. This provides
* a "holding pen" into which all secondary cores are held until we're
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index db5c2ca..cd2c88e 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -809,15 +809,18 @@
the CPU type fitted to the system. This permits binaries to be
run on ARMv4 through to ARMv7 without modification.
+ See Documentation/arm/kernel_user_helpers.txt for details.
+
However, the fixed address nature of these helpers can be used
by ROP (return orientated programming) authors when creating
exploits.
If all of the binaries and libraries which run on your platform
are built specifically for your platform, and make no use of
- these helpers, then you can turn this option off. However,
- when such an binary or library is run, it will receive a SIGILL
- signal, which will terminate the program.
+ these helpers, then you can turn this option off to hinder
+ such exploits. However, in that case, if a binary or library
+ relying on those helpers is run, it will receive a SIGILL signal,
+ which will terminate the program.
Say N here only if you are absolutely certain that you do not
need these helpers; otherwise, the safe option is to say Y.
diff --git a/arch/arm/plat-samsung/init.c b/arch/arm/plat-samsung/init.c
index 3e5c461..50a3ea0 100644
--- a/arch/arm/plat-samsung/init.c
+++ b/arch/arm/plat-samsung/init.c
@@ -55,12 +55,13 @@
printk("CPU %s (id 0x%08lx)\n", cpu->name, idcode);
- if (cpu->map_io == NULL || cpu->init == NULL) {
+ if (cpu->init == NULL) {
printk(KERN_ERR "CPU %s support not enabled\n", cpu->name);
panic("Unsupported Samsung CPU");
}
- cpu->map_io();
+ if (cpu->map_io)
+ cpu->map_io();
}
/* s3c24xx_init_clocks
diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
index 79b317a..83e4f95 100644
--- a/arch/arm/xen/enlighten.c
+++ b/arch/arm/xen/enlighten.c
@@ -172,6 +172,7 @@
per_cpu(xen_vcpu, cpu) = vcpup;
enable_percpu_irq(xen_events_irq, 0);
+ put_cpu();
}
static void xen_restart(enum reboot_mode reboot_mode, const char *cmd)
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index c92de41..b25763b 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -42,14 +42,15 @@
#define TPIDR_EL1 18 /* Thread ID, Privileged */
#define AMAIR_EL1 19 /* Aux Memory Attribute Indirection Register */
#define CNTKCTL_EL1 20 /* Timer Control Register (EL1) */
+#define PAR_EL1 21 /* Physical Address Register */
/* 32bit specific registers. Keep them at the end of the range */
-#define DACR32_EL2 21 /* Domain Access Control Register */
-#define IFSR32_EL2 22 /* Instruction Fault Status Register */
-#define FPEXC32_EL2 23 /* Floating-Point Exception Control Register */
-#define DBGVCR32_EL2 24 /* Debug Vector Catch Register */
-#define TEECR32_EL1 25 /* ThumbEE Configuration Register */
-#define TEEHBR32_EL1 26 /* ThumbEE Handler Base Register */
-#define NR_SYS_REGS 27
+#define DACR32_EL2 22 /* Domain Access Control Register */
+#define IFSR32_EL2 23 /* Instruction Fault Status Register */
+#define FPEXC32_EL2 24 /* Floating-Point Exception Control Register */
+#define DBGVCR32_EL2 25 /* Debug Vector Catch Register */
+#define TEECR32_EL1 26 /* ThumbEE Configuration Register */
+#define TEEHBR32_EL1 27 /* ThumbEE Handler Base Register */
+#define NR_SYS_REGS 28
/* 32bit mapping */
#define c0_MPIDR (MPIDR_EL1 * 2) /* MultiProcessor ID Register */
@@ -69,6 +70,8 @@
#define c5_AIFSR (AFSR1_EL1 * 2) /* Auxiliary Instr Fault Status R */
#define c6_DFAR (FAR_EL1 * 2) /* Data Fault Address Register */
#define c6_IFAR (c6_DFAR + 1) /* Instruction Fault Address Register */
+#define c7_PAR (PAR_EL1 * 2) /* Physical Address Register */
+#define c7_PAR_high (c7_PAR + 1) /* PAR top 32 bits */
#define c10_PRRR (MAIR_EL1 * 2) /* Primary Region Remap Register */
#define c10_NMRR (c10_PRRR + 1) /* Normal Memory Remap Register */
#define c12_VBAR (VBAR_EL1 * 2) /* Vector Base Address Register */
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 644d739..0859a4d 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -129,7 +129,7 @@
struct kvm_mmu_memory_cache mmu_page_cache;
/* Target CPU and feature flags */
- u32 target;
+ int target;
DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES);
/* Detect first run of a vcpu */
diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h
index 46b3beb..717031a 100644
--- a/arch/arm64/include/asm/tlb.h
+++ b/arch/arm64/include/asm/tlb.h
@@ -35,6 +35,7 @@
struct mm_struct *mm;
unsigned int fullmm;
struct vm_area_struct *vma;
+ unsigned long start, end;
unsigned long range_start;
unsigned long range_end;
unsigned int nr;
@@ -97,10 +98,12 @@
}
static inline void
-tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int fullmm)
+tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
{
tlb->mm = mm;
- tlb->fullmm = fullmm;
+ tlb->fullmm = !(start | (end+1));
+ tlb->start = start;
+ tlb->end = end;
tlb->vma = NULL;
tlb->max = ARRAY_SIZE(tlb->local);
tlb->pages = tlb->local;
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index 9ba33c4..12e6ccb 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -107,7 +107,12 @@
static int
armpmu_map_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
{
- int mapping = (*event_map)[config];
+ int mapping;
+
+ if (config >= PERF_COUNT_HW_MAX)
+ return -EINVAL;
+
+ mapping = (*event_map)[config];
return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
}
@@ -317,6 +322,9 @@
struct hw_perf_event fake_event = event->hw;
struct pmu *leader_pmu = event->group_leader->pmu;
+ if (is_software_event(event))
+ return 1;
+
if (event->pmu != leader_pmu || event->state <= PERF_EVENT_STATE_OFF)
return 1;
diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S
index ff985e3..1ac0bbb 100644
--- a/arch/arm64/kvm/hyp.S
+++ b/arch/arm64/kvm/hyp.S
@@ -214,6 +214,7 @@
mrs x21, tpidr_el1
mrs x22, amair_el1
mrs x23, cntkctl_el1
+ mrs x24, par_el1
stp x4, x5, [x3]
stp x6, x7, [x3, #16]
@@ -225,6 +226,7 @@
stp x18, x19, [x3, #112]
stp x20, x21, [x3, #128]
stp x22, x23, [x3, #144]
+ str x24, [x3, #160]
.endm
.macro restore_sysregs
@@ -243,6 +245,7 @@
ldp x18, x19, [x3, #112]
ldp x20, x21, [x3, #128]
ldp x22, x23, [x3, #144]
+ ldr x24, [x3, #160]
msr vmpidr_el2, x4
msr csselr_el1, x5
@@ -264,6 +267,7 @@
msr tpidr_el1, x21
msr amair_el1, x22
msr cntkctl_el1, x23
+ msr par_el1, x24
.endm
.macro skip_32bit_state tmp, target
@@ -600,6 +604,8 @@
// void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
ENTRY(__kvm_tlb_flush_vmid_ipa)
+ dsb ishst
+
kern_hyp_va x0
ldr x2, [x0, #KVM_VTTBR]
msr vttbr_el2, x2
@@ -621,6 +627,7 @@
ENDPROC(__kvm_tlb_flush_vmid_ipa)
ENTRY(__kvm_flush_vm_context)
+ dsb ishst
tlbi alle1is
ic ialluis
dsb sy
@@ -753,6 +760,10 @@
*/
tbnz x1, #7, 1f // S1PTW is set
+ /* Preserve PAR_EL1 */
+ mrs x3, par_el1
+ push x3, xzr
+
/*
* Permission fault, HPFAR_EL2 is invalid.
* Resolve the IPA the hard way using the guest VA.
@@ -766,6 +777,8 @@
/* Read result */
mrs x3, par_el1
+ pop x0, xzr // Restore PAR_EL1 from the stack
+ msr par_el1, x0
tbnz x3, #0, 3f // Bail out if we failed the translation
ubfx x3, x3, #12, #36 // Extract IPA
lsl x3, x3, #4 // and present it like HPFAR
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 9492360..02e9d09 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -211,6 +211,9 @@
/* FAR_EL1 */
{ Op0(0b11), Op1(0b000), CRn(0b0110), CRm(0b0000), Op2(0b000),
NULL, reset_unknown, FAR_EL1 },
+ /* PAR_EL1 */
+ { Op0(0b11), Op1(0b000), CRn(0b0111), CRm(0b0100), Op2(0b000),
+ NULL, reset_unknown, PAR_EL1 },
/* PMINTENSET_EL1 */
{ Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b001),
diff --git a/arch/avr32/boards/atngw100/mrmt.c b/arch/avr32/boards/atngw100/mrmt.c
index f914319..7de083d 100644
--- a/arch/avr32/boards/atngw100/mrmt.c
+++ b/arch/avr32/boards/atngw100/mrmt.c
@@ -150,7 +150,6 @@
static struct platform_device rmt_ts_device = {
.name = "ucb1400_ts",
.id = -1,
- }
};
#endif
diff --git a/arch/hexagon/Kconfig b/arch/hexagon/Kconfig
index 33a9792..77d442a 100644
--- a/arch/hexagon/Kconfig
+++ b/arch/hexagon/Kconfig
@@ -158,6 +158,7 @@
endmenu
source "init/Kconfig"
+source "kernel/Kconfig.freezer"
source "drivers/Kconfig"
source "fs/Kconfig"
diff --git a/arch/ia64/include/asm/tlb.h b/arch/ia64/include/asm/tlb.h
index ef3a9de..bc5efc7 100644
--- a/arch/ia64/include/asm/tlb.h
+++ b/arch/ia64/include/asm/tlb.h
@@ -22,7 +22,7 @@
* unmapping a portion of the virtual address space, these hooks are called according to
* the following template:
*
- * tlb <- tlb_gather_mmu(mm, full_mm_flush); // start unmap for address space MM
+ * tlb <- tlb_gather_mmu(mm, start, end); // start unmap for address space MM
* {
* for each vma that needs a shootdown do {
* tlb_start_vma(tlb, vma);
@@ -58,6 +58,7 @@
unsigned int max;
unsigned char fullmm; /* non-zero means full mm flush */
unsigned char need_flush; /* really unmapped some PTEs? */
+ unsigned long start, end;
unsigned long start_addr;
unsigned long end_addr;
struct page **pages;
@@ -155,13 +156,15 @@
static inline void
-tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush)
+tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
{
tlb->mm = mm;
tlb->max = ARRAY_SIZE(tlb->local);
tlb->pages = tlb->local;
tlb->nr = 0;
- tlb->fullmm = full_mm_flush;
+ tlb->fullmm = !(start | (end+1));
+ tlb->start = start;
+ tlb->end = end;
tlb->start_addr = ~0UL;
}
diff --git a/arch/m68k/emu/natfeat.c b/arch/m68k/emu/natfeat.c
index 2291a7d..fa277ae 100644
--- a/arch/m68k/emu/natfeat.c
+++ b/arch/m68k/emu/natfeat.c
@@ -18,9 +18,11 @@
#include <asm/machdep.h>
#include <asm/natfeat.h>
+extern long nf_get_id2(const char *feature_name);
+
asm("\n"
-" .global nf_get_id,nf_call\n"
-"nf_get_id:\n"
+" .global nf_get_id2,nf_call\n"
+"nf_get_id2:\n"
" .short 0x7300\n"
" rts\n"
"nf_call:\n"
@@ -29,12 +31,25 @@
"1: moveq.l #0,%d0\n"
" rts\n"
" .section __ex_table,\"a\"\n"
-" .long nf_get_id,1b\n"
+" .long nf_get_id2,1b\n"
" .long nf_call,1b\n"
" .previous");
-EXPORT_SYMBOL_GPL(nf_get_id);
EXPORT_SYMBOL_GPL(nf_call);
+long nf_get_id(const char *feature_name)
+{
+ /* feature_name may be in vmalloc()ed memory, so make a copy */
+ char name_copy[32];
+ size_t n;
+
+ n = strlcpy(name_copy, feature_name, sizeof(name_copy));
+ if (n >= sizeof(name_copy))
+ return 0;
+
+ return nf_get_id2(name_copy);
+}
+EXPORT_SYMBOL_GPL(nf_get_id);
+
void nfprint(const char *fmt, ...)
{
static char buf[256];
diff --git a/arch/m68k/include/asm/div64.h b/arch/m68k/include/asm/div64.h
index 444ea8a..ef881cf 100644
--- a/arch/m68k/include/asm/div64.h
+++ b/arch/m68k/include/asm/div64.h
@@ -15,16 +15,17 @@
unsigned long long n64; \
} __n; \
unsigned long __rem, __upper; \
+ unsigned long __base = (base); \
\
__n.n64 = (n); \
if ((__upper = __n.n32[0])) { \
asm ("divul.l %2,%1:%0" \
- : "=d" (__n.n32[0]), "=d" (__upper) \
- : "d" (base), "0" (__n.n32[0])); \
+ : "=d" (__n.n32[0]), "=d" (__upper) \
+ : "d" (__base), "0" (__n.n32[0])); \
} \
asm ("divu.l %2,%1:%0" \
- : "=d" (__n.n32[1]), "=d" (__rem) \
- : "d" (base), "1" (__upper), "0" (__n.n32[1])); \
+ : "=d" (__n.n32[1]), "=d" (__rem) \
+ : "d" (__base), "1" (__upper), "0" (__n.n32[1])); \
(n) = __n.n64; \
__rem; \
})
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig
index d22a4ec..4fab522 100644
--- a/arch/microblaze/Kconfig
+++ b/arch/microblaze/Kconfig
@@ -28,7 +28,7 @@
select GENERIC_CLOCKEVENTS
select GENERIC_IDLE_POLL_SETUP
select MODULES_USE_ELF_RELA
- select CLONE_BACKWARDS
+ select CLONE_BACKWARDS3
config SWAP
def_bool n
diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h
index 1dc0860..fa44f3e 100644
--- a/arch/mips/include/asm/cpu-features.h
+++ b/arch/mips/include/asm/cpu-features.h
@@ -17,6 +17,8 @@
#define current_cpu_type() current_cpu_data.cputype
#endif
+#define boot_cpu_type() cpu_data[0].cputype
+
/*
* SMP assumption: Options of CPU 0 are a superset of all processors.
* This is true for all known MIPS systems.
diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c
index 159abc8..126da74 100644
--- a/arch/mips/kernel/smp-bmips.c
+++ b/arch/mips/kernel/smp-bmips.c
@@ -66,6 +66,8 @@
int i, cpu = 1, boot_cpu = 0;
#if defined(CONFIG_CPU_BMIPS4350) || defined(CONFIG_CPU_BMIPS4380)
+ int cpu_hw_intr;
+
/* arbitration priority */
clear_c0_brcm_cmt_ctrl(0x30);
@@ -80,8 +82,12 @@
* MIPS interrupt 2 (HW INT 0) is the CPU0 L1 controller output
* MIPS interrupt 3 (HW INT 1) is the CPU1 L1 controller output
*/
- change_c0_brcm_cmt_intr(0xf8018000,
- (0x02 << 27) | (0x03 << 15));
+ if (boot_cpu == 0)
+ cpu_hw_intr = 0x02;
+ else
+ cpu_hw_intr = 0x1d;
+
+ change_c0_brcm_cmt_intr(0xf8018000, (cpu_hw_intr << 27) | (0x03 << 15));
/* single core, 2 threads (2 pipelines) */
max_cpus = 2;
diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c
index e773659..46048d2 100644
--- a/arch/mips/math-emu/cp1emu.c
+++ b/arch/mips/math-emu/cp1emu.c
@@ -803,6 +803,32 @@
dec_insn.next_pc_inc;
return 1;
break;
+#ifdef CONFIG_CPU_CAVIUM_OCTEON
+ case lwc2_op: /* This is bbit0 on Octeon */
+ if ((regs->regs[insn.i_format.rs] & (1ull<<insn.i_format.rt)) == 0)
+ *contpc = regs->cp0_epc + 4 + (insn.i_format.simmediate << 2);
+ else
+ *contpc = regs->cp0_epc + 8;
+ return 1;
+ case ldc2_op: /* This is bbit032 on Octeon */
+ if ((regs->regs[insn.i_format.rs] & (1ull<<(insn.i_format.rt + 32))) == 0)
+ *contpc = regs->cp0_epc + 4 + (insn.i_format.simmediate << 2);
+ else
+ *contpc = regs->cp0_epc + 8;
+ return 1;
+ case swc2_op: /* This is bbit1 on Octeon */
+ if (regs->regs[insn.i_format.rs] & (1ull<<insn.i_format.rt))
+ *contpc = regs->cp0_epc + 4 + (insn.i_format.simmediate << 2);
+ else
+ *contpc = regs->cp0_epc + 8;
+ return 1;
+ case sdc2_op: /* This is bbit132 on Octeon */
+ if (regs->regs[insn.i_format.rs] & (1ull<<(insn.i_format.rt + 32)))
+ *contpc = regs->cp0_epc + 4 + (insn.i_format.simmediate << 2);
+ else
+ *contpc = regs->cp0_epc + 8;
+ return 1;
+#endif
case cop0_op:
case cop1_op:
case cop2_op:
diff --git a/arch/mips/oprofile/op_model_mipsxx.c b/arch/mips/oprofile/op_model_mipsxx.c
index e4b1140..3a2b6e9 100644
--- a/arch/mips/oprofile/op_model_mipsxx.c
+++ b/arch/mips/oprofile/op_model_mipsxx.c
@@ -166,7 +166,7 @@
reg.control[i] |= M_PERFCTL_USER;
if (ctr[i].exl)
reg.control[i] |= M_PERFCTL_EXL;
- if (current_cpu_type() == CPU_XLR)
+ if (boot_cpu_type() == CPU_XLR)
reg.control[i] |= M_PERFCTL_COUNT_ALL_THREADS;
reg.counter[i] = 0x80000000 - ctr[i].count;
}
diff --git a/arch/mips/pnx833x/common/platform.c b/arch/mips/pnx833x/common/platform.c
index d22dc0d..2b7e837 100644
--- a/arch/mips/pnx833x/common/platform.c
+++ b/arch/mips/pnx833x/common/platform.c
@@ -206,11 +206,13 @@
.end = PNX8335_IP3902_PORTS_END,
.flags = IORESOURCE_MEM,
},
+#ifdef CONFIG_SOC_PNX8335
[1] = {
.start = PNX8335_PIC_ETHERNET_INT,
.end = PNX8335_PIC_ETHERNET_INT,
.flags = IORESOURCE_IRQ,
},
+#endif
};
static struct platform_device pnx833x_ethernet_device = {
diff --git a/arch/openrisc/Kconfig b/arch/openrisc/Kconfig
index 99dbab1..d60bf98 100644
--- a/arch/openrisc/Kconfig
+++ b/arch/openrisc/Kconfig
@@ -55,6 +55,7 @@
source "init/Kconfig"
+source "kernel/Kconfig.freezer"
menu "Processor type and features"
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 3bf72cd..dbd9d3c 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -566,7 +566,7 @@
config PPC_DENORMALISATION
bool "PowerPC denormalisation exception handling"
depends on PPC_BOOK3S_64
- default "n"
+ default "y" if PPC_POWERNV
---help---
Add support for handling denormalisation of single precision
values. Useful for bare metal only. If unsure say Y here.
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index 47a35b0..e378ccc 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -247,6 +247,10 @@
unsigned long tm_orig_msr; /* Thread's MSR on ctx switch */
struct pt_regs ckpt_regs; /* Checkpointed registers */
+ unsigned long tm_tar;
+ unsigned long tm_ppr;
+ unsigned long tm_dscr;
+
/*
* Transactional FP and VSX 0-31 register set.
* NOTE: the sense of these is the opposite of the integer ckpt_regs!
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index a6840e4..99222e2 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -254,19 +254,28 @@
#define SPRN_HRMOR 0x139 /* Real mode offset register */
#define SPRN_HSRR0 0x13A /* Hypervisor Save/Restore 0 */
#define SPRN_HSRR1 0x13B /* Hypervisor Save/Restore 1 */
+/* HFSCR and FSCR bit numbers are the same */
+#define FSCR_TAR_LG 8 /* Enable Target Address Register */
+#define FSCR_EBB_LG 7 /* Enable Event Based Branching */
+#define FSCR_TM_LG 5 /* Enable Transactional Memory */
+#define FSCR_PM_LG 4 /* Enable prob/priv access to PMU SPRs */
+#define FSCR_BHRB_LG 3 /* Enable Branch History Rolling Buffer*/
+#define FSCR_DSCR_LG 2 /* Enable Data Stream Control Register */
+#define FSCR_VECVSX_LG 1 /* Enable VMX/VSX */
+#define FSCR_FP_LG 0 /* Enable Floating Point */
#define SPRN_FSCR 0x099 /* Facility Status & Control Register */
-#define FSCR_TAR (1 << (63-55)) /* Enable Target Address Register */
-#define FSCR_EBB (1 << (63-56)) /* Enable Event Based Branching */
-#define FSCR_DSCR (1 << (63-61)) /* Enable Data Stream Control Register */
+#define FSCR_TAR __MASK(FSCR_TAR_LG)
+#define FSCR_EBB __MASK(FSCR_EBB_LG)
+#define FSCR_DSCR __MASK(FSCR_DSCR_LG)
#define SPRN_HFSCR 0xbe /* HV=1 Facility Status & Control Register */
-#define HFSCR_TAR (1 << (63-55)) /* Enable Target Address Register */
-#define HFSCR_EBB (1 << (63-56)) /* Enable Event Based Branching */
-#define HFSCR_TM (1 << (63-58)) /* Enable Transactional Memory */
-#define HFSCR_PM (1 << (63-60)) /* Enable prob/priv access to PMU SPRs */
-#define HFSCR_BHRB (1 << (63-59)) /* Enable Branch History Rolling Buffer*/
-#define HFSCR_DSCR (1 << (63-61)) /* Enable Data Stream Control Register */
-#define HFSCR_VECVSX (1 << (63-62)) /* Enable VMX/VSX */
-#define HFSCR_FP (1 << (63-63)) /* Enable Floating Point */
+#define HFSCR_TAR __MASK(FSCR_TAR_LG)
+#define HFSCR_EBB __MASK(FSCR_EBB_LG)
+#define HFSCR_TM __MASK(FSCR_TM_LG)
+#define HFSCR_PM __MASK(FSCR_PM_LG)
+#define HFSCR_BHRB __MASK(FSCR_BHRB_LG)
+#define HFSCR_DSCR __MASK(FSCR_DSCR_LG)
+#define HFSCR_VECVSX __MASK(FSCR_VECVSX_LG)
+#define HFSCR_FP __MASK(FSCR_FP_LG)
#define SPRN_TAR 0x32f /* Target Address Register */
#define SPRN_LPCR 0x13E /* LPAR Control Register */
#define LPCR_VPM0 (1ul << (63-0))
diff --git a/arch/powerpc/include/asm/switch_to.h b/arch/powerpc/include/asm/switch_to.h
index 49a13e0..294c2ce 100644
--- a/arch/powerpc/include/asm/switch_to.h
+++ b/arch/powerpc/include/asm/switch_to.h
@@ -15,6 +15,15 @@
struct thread_struct;
extern struct task_struct *_switch(struct thread_struct *prev,
struct thread_struct *next);
+#ifdef CONFIG_PPC_BOOK3S_64
+static inline void save_tar(struct thread_struct *prev)
+{
+ if (cpu_has_feature(CPU_FTR_ARCH_207S))
+ prev->tar = mfspr(SPRN_TAR);
+}
+#else
+static inline void save_tar(struct thread_struct *prev) {}
+#endif
extern void giveup_fpu(struct task_struct *);
extern void load_up_fpu(void);
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index c7e8afc..8207459 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -138,6 +138,9 @@
DEFINE(THREAD_TM_TFHAR, offsetof(struct thread_struct, tm_tfhar));
DEFINE(THREAD_TM_TEXASR, offsetof(struct thread_struct, tm_texasr));
DEFINE(THREAD_TM_TFIAR, offsetof(struct thread_struct, tm_tfiar));
+ DEFINE(THREAD_TM_TAR, offsetof(struct thread_struct, tm_tar));
+ DEFINE(THREAD_TM_PPR, offsetof(struct thread_struct, tm_ppr));
+ DEFINE(THREAD_TM_DSCR, offsetof(struct thread_struct, tm_dscr));
DEFINE(PT_CKPT_REGS, offsetof(struct thread_struct, ckpt_regs));
DEFINE(THREAD_TRANSACT_VR0, offsetof(struct thread_struct,
transact_vr[0]));
diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
index ea9414c8..55593ee 100644
--- a/arch/powerpc/kernel/eeh.c
+++ b/arch/powerpc/kernel/eeh.c
@@ -1061,7 +1061,7 @@
static int __init eeh_init_proc(void)
{
- if (machine_is(pseries))
+ if (machine_is(pseries) || machine_is(powernv))
proc_create("powerpc/eeh", 0, NULL, &proc_eeh_operations);
return 0;
}
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index ab15b8d..2bd0b88 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -449,15 +449,6 @@
#ifdef CONFIG_PPC_BOOK3S_64
BEGIN_FTR_SECTION
- /*
- * Back up the TAR across context switches. Note that the TAR is not
- * available for use in the kernel. (To provide this, the TAR should
- * be backed up/restored on exception entry/exit instead, and be in
- * pt_regs. FIXME, this should be in pt_regs anyway (for debug).)
- */
- mfspr r0,SPRN_TAR
- std r0,THREAD_TAR(r3)
-
/* Event based branch registers */
mfspr r0, SPRN_BESCR
std r0, THREAD_BESCR(r3)
@@ -584,9 +575,34 @@
ld r7,DSCR_DEFAULT@toc(2)
ld r0,THREAD_DSCR(r4)
cmpwi r6,0
+ li r8, FSCR_DSCR
bne 1f
ld r0,0(r7)
-1: cmpd r0,r25
+ b 3f
+1:
+ BEGIN_FTR_SECTION_NESTED(70)
+ mfspr r6, SPRN_FSCR
+ or r6, r6, r8
+ mtspr SPRN_FSCR, r6
+ BEGIN_FTR_SECTION_NESTED(69)
+ mfspr r6, SPRN_HFSCR
+ or r6, r6, r8
+ mtspr SPRN_HFSCR, r6
+ END_FTR_SECTION_NESTED(CPU_FTR_HVMODE, CPU_FTR_HVMODE, 69)
+ b 4f
+ END_FTR_SECTION_NESTED(CPU_FTR_ARCH_207S, CPU_FTR_ARCH_207S, 70)
+3:
+ BEGIN_FTR_SECTION_NESTED(70)
+ mfspr r6, SPRN_FSCR
+ andc r6, r6, r8
+ mtspr SPRN_FSCR, r6
+ BEGIN_FTR_SECTION_NESTED(69)
+ mfspr r6, SPRN_HFSCR
+ andc r6, r6, r8
+ mtspr SPRN_HFSCR, r6
+ END_FTR_SECTION_NESTED(CPU_FTR_HVMODE, CPU_FTR_HVMODE, 69)
+ END_FTR_SECTION_NESTED(CPU_FTR_ARCH_207S, CPU_FTR_ARCH_207S, 70)
+4: cmpd r0,r25
beq 2f
mtspr SPRN_DSCR,r0
2:
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 4e00d22..902ca3c 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -848,7 +848,7 @@
. = 0x4f80
SET_SCRATCH0(r13)
EXCEPTION_PROLOG_0(PACA_EXGEN)
- b facility_unavailable_relon_hv
+ b hv_facility_unavailable_relon_hv
STD_RELON_EXCEPTION_PSERIES(0x5300, 0x1300, instruction_breakpoint)
#ifdef CONFIG_PPC_DENORMALISATION
@@ -1175,6 +1175,7 @@
b .ret_from_except
STD_EXCEPTION_COMMON(0xf60, facility_unavailable, .facility_unavailable_exception)
+ STD_EXCEPTION_COMMON(0xf80, hv_facility_unavailable, .facility_unavailable_exception)
.align 7
.globl __end_handlers
@@ -1188,7 +1189,7 @@
STD_RELON_EXCEPTION_PSERIES_OOL(0xf20, altivec_unavailable)
STD_RELON_EXCEPTION_PSERIES_OOL(0xf40, vsx_unavailable)
STD_RELON_EXCEPTION_PSERIES_OOL(0xf60, facility_unavailable)
- STD_RELON_EXCEPTION_HV_OOL(0xf80, facility_unavailable)
+ STD_RELON_EXCEPTION_HV_OOL(0xf80, hv_facility_unavailable)
#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
/*
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index c517dbe..8083be2 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -600,6 +600,16 @@
struct ppc64_tlb_batch *batch;
#endif
+ /* Back up the TAR across context switches.
+ * Note that the TAR is not available for use in the kernel. (To
+ * provide this, the TAR should be backed up/restored on exception
+ * entry/exit instead, and be in pt_regs. FIXME, this should be in
+ * pt_regs anyway (for debug).)
+ * Save the TAR here before we do treclaim/trecheckpoint as these
+ * will change the TAR.
+ */
+ save_tar(&prev->thread);
+
__switch_to_tm(prev);
#ifdef CONFIG_SMP
diff --git a/arch/powerpc/kernel/tm.S b/arch/powerpc/kernel/tm.S
index 51be8fb..0554d1f 100644
--- a/arch/powerpc/kernel/tm.S
+++ b/arch/powerpc/kernel/tm.S
@@ -233,6 +233,16 @@
std r5, _CCR(r7)
std r6, _XER(r7)
+
+ /* ******************** TAR, PPR, DSCR ********** */
+ mfspr r3, SPRN_TAR
+ mfspr r4, SPRN_PPR
+ mfspr r5, SPRN_DSCR
+
+ std r3, THREAD_TM_TAR(r12)
+ std r4, THREAD_TM_PPR(r12)
+ std r5, THREAD_TM_DSCR(r12)
+
/* MSR and flags: We don't change CRs, and we don't need to alter
* MSR.
*/
@@ -347,6 +357,16 @@
mtmsr r6 /* FP/Vec off again! */
restore_gprs:
+
+ /* ******************** TAR, PPR, DSCR ********** */
+ ld r4, THREAD_TM_TAR(r3)
+ ld r5, THREAD_TM_PPR(r3)
+ ld r6, THREAD_TM_DSCR(r3)
+
+ mtspr SPRN_TAR, r4
+ mtspr SPRN_PPR, r5
+ mtspr SPRN_DSCR, r6
+
/* ******************** CR,LR,CCR,MSR ********** */
ld r3, _CTR(r7)
ld r4, _LINK(r7)
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index bf33c22..e435bc0 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -44,9 +44,7 @@
#include <asm/machdep.h>
#include <asm/rtas.h>
#include <asm/pmc.h>
-#ifdef CONFIG_PPC32
#include <asm/reg.h>
-#endif
#ifdef CONFIG_PMAC_BACKLIGHT
#include <asm/backlight.h>
#endif
@@ -1296,43 +1294,54 @@
die("Unrecoverable VSX Unavailable Exception", regs, SIGABRT);
}
+#ifdef CONFIG_PPC64
void facility_unavailable_exception(struct pt_regs *regs)
{
static char *facility_strings[] = {
- "FPU",
- "VMX/VSX",
- "DSCR",
- "PMU SPRs",
- "BHRB",
- "TM",
- "AT",
- "EBB",
- "TAR",
+ [FSCR_FP_LG] = "FPU",
+ [FSCR_VECVSX_LG] = "VMX/VSX",
+ [FSCR_DSCR_LG] = "DSCR",
+ [FSCR_PM_LG] = "PMU SPRs",
+ [FSCR_BHRB_LG] = "BHRB",
+ [FSCR_TM_LG] = "TM",
+ [FSCR_EBB_LG] = "EBB",
+ [FSCR_TAR_LG] = "TAR",
};
- char *facility, *prefix;
+ char *facility = "unknown";
u64 value;
+ u8 status;
+ bool hv;
- if (regs->trap == 0xf60) {
- value = mfspr(SPRN_FSCR);
- prefix = "";
- } else {
+ hv = (regs->trap == 0xf80);
+ if (hv)
value = mfspr(SPRN_HFSCR);
- prefix = "Hypervisor ";
+ else
+ value = mfspr(SPRN_FSCR);
+
+ status = value >> 56;
+ if (status == FSCR_DSCR_LG) {
+ /* User is acessing the DSCR. Set the inherit bit and allow
+ * the user to set it directly in future by setting via the
+ * H/FSCR DSCR bit.
+ */
+ current->thread.dscr_inherit = 1;
+ if (hv)
+ mtspr(SPRN_HFSCR, value | HFSCR_DSCR);
+ else
+ mtspr(SPRN_FSCR, value | FSCR_DSCR);
+ return;
}
- value = value >> 56;
+ if ((status < ARRAY_SIZE(facility_strings)) &&
+ facility_strings[status])
+ facility = facility_strings[status];
/* We restore the interrupt state now */
if (!arch_irq_disabled_regs(regs))
local_irq_enable();
- if (value < ARRAY_SIZE(facility_strings))
- facility = facility_strings[value];
- else
- facility = "unknown";
-
pr_err("%sFacility '%s' unavailable, exception at 0x%lx, MSR=%lx\n",
- prefix, facility, regs->nip, regs->msr);
+ hv ? "Hypervisor " : "", facility, regs->nip, regs->msr);
if (user_mode(regs)) {
_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
@@ -1341,6 +1350,7 @@
die("Unexpected facility unavailable exception", regs, SIGABRT);
}
+#endif
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 2efa9dd..7629cd3 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -1809,7 +1809,7 @@
rma_size <<= PAGE_SHIFT;
rmls = lpcr_rmls(rma_size);
err = -EINVAL;
- if (rmls < 0) {
+ if ((long)rmls < 0) {
pr_err("KVM: Can't use RMA of 0x%lx bytes\n", rma_size);
goto out_srcu;
}
@@ -1874,7 +1874,7 @@
/* Allocate the guest's logical partition ID */
lpid = kvmppc_alloc_lpid();
- if (lpid < 0)
+ if ((long)lpid < 0)
return -ENOMEM;
kvm->arch.lpid = lpid;
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index 19498a5..c6e13d9 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -1047,11 +1047,12 @@
if (err)
goto free_shadow_vcpu;
+ err = -ENOMEM;
p = __get_free_page(GFP_KERNEL|__GFP_ZERO);
- /* the real shared page fills the last 4k of our page */
- vcpu->arch.shared = (void*)(p + PAGE_SIZE - 4096);
if (!p)
goto uninit_vcpu;
+ /* the real shared page fills the last 4k of our page */
+ vcpu->arch.shared = (void *)(p + PAGE_SIZE - 4096);
#ifdef CONFIG_PPC_BOOK3S_64
/* default to book3s_64 (970fx) */
diff --git a/arch/powerpc/platforms/pseries/nvram.c b/arch/powerpc/platforms/pseries/nvram.c
index 9f8671a..6a5f2b1 100644
--- a/arch/powerpc/platforms/pseries/nvram.c
+++ b/arch/powerpc/platforms/pseries/nvram.c
@@ -569,35 +569,6 @@
return ret;
}
-static int unzip_oops(char *oops_buf, char *big_buf)
-{
- struct oops_log_info *oops_hdr = (struct oops_log_info *)oops_buf;
- u64 timestamp = oops_hdr->timestamp;
- char *big_oops_data = NULL;
- char *oops_data_buf = NULL;
- size_t big_oops_data_sz;
- int unzipped_len;
-
- big_oops_data = big_buf + sizeof(struct oops_log_info);
- big_oops_data_sz = big_oops_buf_sz - sizeof(struct oops_log_info);
- oops_data_buf = oops_buf + sizeof(struct oops_log_info);
-
- unzipped_len = nvram_decompress(oops_data_buf, big_oops_data,
- oops_hdr->report_length,
- big_oops_data_sz);
-
- if (unzipped_len < 0) {
- pr_err("nvram: decompression failed; returned %d\n",
- unzipped_len);
- return -1;
- }
- oops_hdr = (struct oops_log_info *)big_buf;
- oops_hdr->version = OOPS_HDR_VERSION;
- oops_hdr->report_length = (u16) unzipped_len;
- oops_hdr->timestamp = timestamp;
- return 0;
-}
-
static int nvram_pstore_open(struct pstore_info *psi)
{
/* Reset the iterator to start reading partitions again */
@@ -685,10 +656,9 @@
unsigned int err_type, id_no, size = 0;
struct nvram_os_partition *part = NULL;
char *buff = NULL, *big_buff = NULL;
- int rc, sig = 0;
+ int sig = 0;
loff_t p;
-read_partition:
read_type++;
switch (nvram_type_ids[read_type]) {
@@ -749,30 +719,46 @@
*id = id_no;
if (nvram_type_ids[read_type] == PSTORE_TYPE_DMESG) {
+ int length, unzipped_len;
+ size_t hdr_size;
+
oops_hdr = (struct oops_log_info *)buff;
- *buf = buff + sizeof(*oops_hdr);
+ if (oops_hdr->version < OOPS_HDR_VERSION) {
+ /* Old format oops header had 2-byte record size */
+ hdr_size = sizeof(u16);
+ length = oops_hdr->version;
+ time->tv_sec = 0;
+ time->tv_nsec = 0;
+ } else {
+ hdr_size = sizeof(*oops_hdr);
+ length = oops_hdr->report_length;
+ time->tv_sec = oops_hdr->timestamp;
+ time->tv_nsec = 0;
+ }
+ *buf = kmalloc(length, GFP_KERNEL);
+ if (*buf == NULL)
+ return -ENOMEM;
+ memcpy(*buf, buff + hdr_size, length);
+ kfree(buff);
if (err_type == ERR_TYPE_KERNEL_PANIC_GZ) {
big_buff = kmalloc(big_oops_buf_sz, GFP_KERNEL);
if (!big_buff)
return -ENOMEM;
- rc = unzip_oops(buff, big_buff);
+ unzipped_len = nvram_decompress(*buf, big_buff,
+ length, big_oops_buf_sz);
- if (rc != 0) {
- kfree(buff);
+ if (unzipped_len < 0) {
+ pr_err("nvram: decompression failed, returned "
+ "rc %d\n", unzipped_len);
kfree(big_buff);
- goto read_partition;
+ } else {
+ *buf = big_buff;
+ length = unzipped_len;
}
-
- oops_hdr = (struct oops_log_info *)big_buff;
- *buf = big_buff + sizeof(*oops_hdr);
- kfree(buff);
}
-
- time->tv_sec = oops_hdr->timestamp;
- time->tv_nsec = 0;
- return oops_hdr->report_length;
+ return length;
}
*buf = buff;
@@ -816,6 +802,7 @@
static void __init nvram_init_oops_partition(int rtas_partition_exists)
{
int rc;
+ size_t size;
rc = pseries_nvram_init_os_partition(&oops_log_partition);
if (rc != 0) {
@@ -844,8 +831,9 @@
big_oops_buf_sz = (oops_data_sz * 100) / 45;
big_oops_buf = kmalloc(big_oops_buf_sz, GFP_KERNEL);
if (big_oops_buf) {
- stream.workspace = kmalloc(zlib_deflate_workspacesize(
- WINDOW_BITS, MEM_LEVEL), GFP_KERNEL);
+ size = max(zlib_deflate_workspacesize(WINDOW_BITS, MEM_LEVEL),
+ zlib_inflate_workspacesize());
+ stream.workspace = kmalloc(size, GFP_KERNEL);
if (!stream.workspace) {
pr_err("nvram: No memory for compression workspace; "
"skipping compression of %s partition data\n",
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 22f75b5..8a4cae7 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -118,6 +118,7 @@
select HAVE_FUNCTION_TRACE_MCOUNT_TEST
select HAVE_KERNEL_BZIP2
select HAVE_KERNEL_GZIP
+ select HAVE_KERNEL_LZ4
select HAVE_KERNEL_LZMA
select HAVE_KERNEL_LZO
select HAVE_KERNEL_XZ
@@ -227,11 +228,12 @@
not work on older machines.
config MARCH_ZEC12
- bool "IBM zEC12"
+ bool "IBM zBC12 and zEC12"
select HAVE_MARCH_ZEC12_FEATURES if 64BIT
help
- Select this to enable optimizations for IBM zEC12 (2827 series). The
- kernel will be slightly faster but will not work on older machines.
+ Select this to enable optimizations for IBM zBC12 and zEC12 (2828 and
+ 2827 series). The kernel will be slightly faster but will not work on
+ older machines.
endchoice
@@ -709,6 +711,7 @@
def_bool y
prompt "s390 support for virtio devices"
depends on 64BIT
+ select TTY
select VIRTUALIZATION
select VIRTIO
select VIRTIO_CONSOLE
diff --git a/arch/s390/boot/compressed/Makefile b/arch/s390/boot/compressed/Makefile
index 3ad8f61..866ecbe 100644
--- a/arch/s390/boot/compressed/Makefile
+++ b/arch/s390/boot/compressed/Makefile
@@ -6,9 +6,9 @@
BITS := $(if $(CONFIG_64BIT),64,31)
-targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 \
- vmlinux.bin.xz vmlinux.bin.lzma vmlinux.bin.lzo misc.o piggy.o \
- sizes.h head$(BITS).o
+targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2
+targets += vmlinux.bin.xz vmlinux.bin.lzma vmlinux.bin.lzo vmlinux.bin.lz4
+targets += misc.o piggy.o sizes.h head$(BITS).o
KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2
KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
@@ -48,6 +48,7 @@
suffix-$(CONFIG_KERNEL_GZIP) := gz
suffix-$(CONFIG_KERNEL_BZIP2) := bz2
+suffix-$(CONFIG_KERNEL_LZ4) := lz4
suffix-$(CONFIG_KERNEL_LZMA) := lzma
suffix-$(CONFIG_KERNEL_LZO) := lzo
suffix-$(CONFIG_KERNEL_XZ) := xz
@@ -56,6 +57,8 @@
$(call if_changed,gzip)
$(obj)/vmlinux.bin.bz2: $(vmlinux.bin.all-y)
$(call if_changed,bzip2)
+$(obj)/vmlinux.bin.lz4: $(vmlinux.bin.all-y)
+ $(call if_changed,lz4)
$(obj)/vmlinux.bin.lzma: $(vmlinux.bin.all-y)
$(call if_changed,lzma)
$(obj)/vmlinux.bin.lzo: $(vmlinux.bin.all-y)
diff --git a/arch/s390/boot/compressed/misc.c b/arch/s390/boot/compressed/misc.c
index c4c6a1c..57cbaff 100644
--- a/arch/s390/boot/compressed/misc.c
+++ b/arch/s390/boot/compressed/misc.c
@@ -47,6 +47,10 @@
#include "../../../../lib/decompress_bunzip2.c"
#endif
+#ifdef CONFIG_KERNEL_LZ4
+#include "../../../../lib/decompress_unlz4.c"
+#endif
+
#ifdef CONFIG_KERNEL_LZMA
#include "../../../../lib/decompress_unlzma.c"
#endif
diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h
index 4d8604e..7d46767 100644
--- a/arch/s390/include/asm/bitops.h
+++ b/arch/s390/include/asm/bitops.h
@@ -693,7 +693,7 @@
size -= offset;
p = addr + offset / BITS_PER_LONG;
if (bit) {
- set = __flo_word(0, *p & (~0UL << bit));
+ set = __flo_word(0, *p & (~0UL >> bit));
if (set >= size)
return size + offset;
if (set < BITS_PER_LONG)
diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h
index b75d7d6..6d6d92b 100644
--- a/arch/s390/include/asm/tlb.h
+++ b/arch/s390/include/asm/tlb.h
@@ -32,6 +32,7 @@
struct mm_struct *mm;
struct mmu_table_batch *batch;
unsigned int fullmm;
+ unsigned long start, end;
};
struct mmu_table_batch {
@@ -48,10 +49,13 @@
static inline void tlb_gather_mmu(struct mmu_gather *tlb,
struct mm_struct *mm,
- unsigned int full_mm_flush)
+ unsigned long start,
+ unsigned long end)
{
tlb->mm = mm;
- tlb->fullmm = full_mm_flush;
+ tlb->start = start;
+ tlb->end = end;
+ tlb->fullmm = !(start | (end+1));
tlb->batch = NULL;
if (tlb->fullmm)
__tlb_flush_mm(mm);
diff --git a/arch/s390/kernel/perf_event.c b/arch/s390/kernel/perf_event.c
index a6fc037..500aa10 100644
--- a/arch/s390/kernel/perf_event.c
+++ b/arch/s390/kernel/perf_event.c
@@ -52,12 +52,13 @@
static bool is_in_guest(struct pt_regs *regs)
{
- unsigned long ip = instruction_pointer(regs);
-
if (user_mode(regs))
return false;
-
- return ip == (unsigned long) &sie_exit;
+#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
+ return instruction_pointer(regs) == (unsigned long) &sie_exit;
+#else
+ return false;
+#endif
}
static unsigned long guest_is_user_mode(struct pt_regs *regs)
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 497451e..aeed8a6 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -994,6 +994,7 @@
strcpy(elf_platform, "z196");
break;
case 0x2827:
+ case 0x2828:
strcpy(elf_platform, "zEC12");
break;
}
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index ba694d2..34c1c9a 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -702,14 +702,25 @@
return rc;
vcpu->arch.sie_block->icptcode = 0;
- preempt_disable();
- kvm_guest_enter();
- preempt_enable();
VCPU_EVENT(vcpu, 6, "entering sie flags %x",
atomic_read(&vcpu->arch.sie_block->cpuflags));
trace_kvm_s390_sie_enter(vcpu,
atomic_read(&vcpu->arch.sie_block->cpuflags));
+
+ /*
+ * As PF_VCPU will be used in fault handler, between guest_enter
+ * and guest_exit should be no uaccess.
+ */
+ preempt_disable();
+ kvm_guest_enter();
+ preempt_enable();
rc = sie64a(vcpu->arch.sie_block, vcpu->run->s.regs.gprs);
+ kvm_guest_exit();
+
+ VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
+ vcpu->arch.sie_block->icptcode);
+ trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
+
if (rc > 0)
rc = 0;
if (rc < 0) {
@@ -721,10 +732,6 @@
rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
}
}
- VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
- vcpu->arch.sie_block->icptcode);
- trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
- kvm_guest_exit();
memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
return rc;
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index 0da3e6e..4cdc54e 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -16,6 +16,7 @@
#include <linux/errno.h>
#include <linux/compat.h>
#include <asm/asm-offsets.h>
+#include <asm/facility.h>
#include <asm/current.h>
#include <asm/debug.h>
#include <asm/ebcdic.h>
@@ -532,8 +533,7 @@
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
/* Only provide non-quiescing support if the host supports it */
- if (vcpu->run->s.regs.gprs[reg1] & PFMF_NQ &&
- S390_lowcore.stfl_fac_list & 0x00020000)
+ if (vcpu->run->s.regs.gprs[reg1] & PFMF_NQ && !test_facility(14))
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
/* No support for conditional-SSKE */
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index ce36ea8..ad446b0 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -69,6 +69,7 @@
order = 2;
break;
case 0x2827: /* zEC12 */
+ case 0x2828: /* zEC12 */
default:
order = 5;
break;
diff --git a/arch/s390/oprofile/init.c b/arch/s390/oprofile/init.c
index ffeb17c..930783d 100644
--- a/arch/s390/oprofile/init.c
+++ b/arch/s390/oprofile/init.c
@@ -440,7 +440,7 @@
switch (id.machine) {
case 0x2097: case 0x2098: ops->cpu_type = "s390/z10"; break;
case 0x2817: case 0x2818: ops->cpu_type = "s390/z196"; break;
- case 0x2827: ops->cpu_type = "s390/zEC12"; break;
+ case 0x2827: case 0x2828: ops->cpu_type = "s390/zEC12"; break;
default: return -ENODEV;
}
}
diff --git a/arch/score/Kconfig b/arch/score/Kconfig
index c8def8b..5fc2375 100644
--- a/arch/score/Kconfig
+++ b/arch/score/Kconfig
@@ -87,6 +87,8 @@
source "init/Kconfig"
+source "kernel/Kconfig.freezer"
+
config MMU
def_bool y
diff --git a/arch/sh/include/asm/tlb.h b/arch/sh/include/asm/tlb.h
index e61d43d..362192e 100644
--- a/arch/sh/include/asm/tlb.h
+++ b/arch/sh/include/asm/tlb.h
@@ -36,10 +36,12 @@
}
static inline void
-tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush)
+tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
{
tlb->mm = mm;
- tlb->fullmm = full_mm_flush;
+ tlb->start = start;
+ tlb->end = end;
+ tlb->fullmm = !(start | (end+1));
init_tlb_gather(tlb);
}
diff --git a/arch/um/include/asm/tlb.h b/arch/um/include/asm/tlb.h
index 4febacd..29b0301 100644
--- a/arch/um/include/asm/tlb.h
+++ b/arch/um/include/asm/tlb.h
@@ -45,10 +45,12 @@
}
static inline void
-tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush)
+tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
{
tlb->mm = mm;
- tlb->fullmm = full_mm_flush;
+ tlb->start = start;
+ tlb->end = end;
+ tlb->fullmm = !(start | (end+1));
init_tlb_gather(tlb);
}
diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
index d606463..b7388a4 100644
--- a/arch/x86/boot/compressed/eboot.c
+++ b/arch/x86/boot/compressed/eboot.c
@@ -225,7 +225,7 @@
unsigned long nr_pages;
nr_pages = round_up(size, EFI_PAGE_SIZE) / EFI_PAGE_SIZE;
- efi_call_phys2(sys_table->boottime->free_pages, addr, size);
+ efi_call_phys2(sys_table->boottime->free_pages, addr, nr_pages);
}
static void find_bits(unsigned long mask, u8 *pos, u8 *size)
diff --git a/arch/x86/include/asm/bootparam_utils.h b/arch/x86/include/asm/bootparam_utils.h
index 653668d..4a8cb8d 100644
--- a/arch/x86/include/asm/bootparam_utils.h
+++ b/arch/x86/include/asm/bootparam_utils.h
@@ -35,9 +35,9 @@
*/
if (boot_params->sentinel) {
/* fields in boot_params are left uninitialized, clear them */
- memset(&boot_params->olpc_ofw_header, 0,
+ memset(&boot_params->ext_ramdisk_image, 0,
(char *)&boot_params->efi_info -
- (char *)&boot_params->olpc_ofw_header);
+ (char *)&boot_params->ext_ramdisk_image);
memset(&boot_params->kbd_status, 0,
(char *)&boot_params->hdr -
(char *)&boot_params->kbd_status);
diff --git a/arch/x86/include/asm/microcode_amd.h b/arch/x86/include/asm/microcode_amd.h
index 50e5c58..4c01917 100644
--- a/arch/x86/include/asm/microcode_amd.h
+++ b/arch/x86/include/asm/microcode_amd.h
@@ -59,7 +59,7 @@
extern int __apply_microcode_amd(struct microcode_amd *mc_amd);
extern int apply_microcode_amd(int cpu);
-extern enum ucode_state load_microcode_amd(int cpu, const u8 *data, size_t size);
+extern enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size);
#ifdef CONFIG_MICROCODE_AMD_EARLY
#ifdef CONFIG_X86_32
diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
index f2b489c..3bf2dd0 100644
--- a/arch/x86/include/asm/pgtable-2level.h
+++ b/arch/x86/include/asm/pgtable-2level.h
@@ -55,9 +55,53 @@
#define native_pmdp_get_and_clear(xp) native_local_pmdp_get_and_clear(xp)
#endif
+#ifdef CONFIG_MEM_SOFT_DIRTY
+
+/*
+ * Bits _PAGE_BIT_PRESENT, _PAGE_BIT_FILE, _PAGE_BIT_SOFT_DIRTY and
+ * _PAGE_BIT_PROTNONE are taken, split up the 28 bits of offset
+ * into this range.
+ */
+#define PTE_FILE_MAX_BITS 28
+#define PTE_FILE_SHIFT1 (_PAGE_BIT_PRESENT + 1)
+#define PTE_FILE_SHIFT2 (_PAGE_BIT_FILE + 1)
+#define PTE_FILE_SHIFT3 (_PAGE_BIT_PROTNONE + 1)
+#define PTE_FILE_SHIFT4 (_PAGE_BIT_SOFT_DIRTY + 1)
+#define PTE_FILE_BITS1 (PTE_FILE_SHIFT2 - PTE_FILE_SHIFT1 - 1)
+#define PTE_FILE_BITS2 (PTE_FILE_SHIFT3 - PTE_FILE_SHIFT2 - 1)
+#define PTE_FILE_BITS3 (PTE_FILE_SHIFT4 - PTE_FILE_SHIFT3 - 1)
+
+#define pte_to_pgoff(pte) \
+ ((((pte).pte_low >> (PTE_FILE_SHIFT1)) \
+ & ((1U << PTE_FILE_BITS1) - 1))) \
+ + ((((pte).pte_low >> (PTE_FILE_SHIFT2)) \
+ & ((1U << PTE_FILE_BITS2) - 1)) \
+ << (PTE_FILE_BITS1)) \
+ + ((((pte).pte_low >> (PTE_FILE_SHIFT3)) \
+ & ((1U << PTE_FILE_BITS3) - 1)) \
+ << (PTE_FILE_BITS1 + PTE_FILE_BITS2)) \
+ + ((((pte).pte_low >> (PTE_FILE_SHIFT4))) \
+ << (PTE_FILE_BITS1 + PTE_FILE_BITS2 + PTE_FILE_BITS3))
+
+#define pgoff_to_pte(off) \
+ ((pte_t) { .pte_low = \
+ ((((off)) & ((1U << PTE_FILE_BITS1) - 1)) << PTE_FILE_SHIFT1) \
+ + ((((off) >> PTE_FILE_BITS1) \
+ & ((1U << PTE_FILE_BITS2) - 1)) \
+ << PTE_FILE_SHIFT2) \
+ + ((((off) >> (PTE_FILE_BITS1 + PTE_FILE_BITS2)) \
+ & ((1U << PTE_FILE_BITS3) - 1)) \
+ << PTE_FILE_SHIFT3) \
+ + ((((off) >> \
+ (PTE_FILE_BITS1 + PTE_FILE_BITS2 + PTE_FILE_BITS3))) \
+ << PTE_FILE_SHIFT4) \
+ + _PAGE_FILE })
+
+#else /* CONFIG_MEM_SOFT_DIRTY */
+
/*
* Bits _PAGE_BIT_PRESENT, _PAGE_BIT_FILE and _PAGE_BIT_PROTNONE are taken,
- * split up the 29 bits of offset into this range:
+ * split up the 29 bits of offset into this range.
*/
#define PTE_FILE_MAX_BITS 29
#define PTE_FILE_SHIFT1 (_PAGE_BIT_PRESENT + 1)
@@ -88,6 +132,8 @@
<< PTE_FILE_SHIFT3) \
+ _PAGE_FILE })
+#endif /* CONFIG_MEM_SOFT_DIRTY */
+
/* Encode and de-code a swap entry */
#if _PAGE_BIT_FILE < _PAGE_BIT_PROTNONE
#define SWP_TYPE_BITS (_PAGE_BIT_FILE - _PAGE_BIT_PRESENT - 1)
diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
index 4cc9f2b..81bb91b 100644
--- a/arch/x86/include/asm/pgtable-3level.h
+++ b/arch/x86/include/asm/pgtable-3level.h
@@ -179,6 +179,9 @@
/*
* Bits 0, 6 and 7 are taken in the low part of the pte,
* put the 32 bits of offset into the high part.
+ *
+ * For soft-dirty tracking 11 bit is taken from
+ * the low part of pte as well.
*/
#define pte_to_pgoff(pte) ((pte).pte_high)
#define pgoff_to_pte(off) \
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 7dc305a..1c00631 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -314,6 +314,36 @@
return pmd_set_flags(pmd, _PAGE_SOFT_DIRTY);
}
+static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
+{
+ return pte_set_flags(pte, _PAGE_SWP_SOFT_DIRTY);
+}
+
+static inline int pte_swp_soft_dirty(pte_t pte)
+{
+ return pte_flags(pte) & _PAGE_SWP_SOFT_DIRTY;
+}
+
+static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
+{
+ return pte_clear_flags(pte, _PAGE_SWP_SOFT_DIRTY);
+}
+
+static inline pte_t pte_file_clear_soft_dirty(pte_t pte)
+{
+ return pte_clear_flags(pte, _PAGE_SOFT_DIRTY);
+}
+
+static inline pte_t pte_file_mksoft_dirty(pte_t pte)
+{
+ return pte_set_flags(pte, _PAGE_SOFT_DIRTY);
+}
+
+static inline int pte_file_soft_dirty(pte_t pte)
+{
+ return pte_flags(pte) & _PAGE_SOFT_DIRTY;
+}
+
/*
* Mask out unsupported bits in a present pgprot. Non-present pgprots
* can use those bits for other purposes, so leave them be.
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index c98ac63..f4843e0 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -61,12 +61,27 @@
* they do not conflict with each other.
*/
+#define _PAGE_BIT_SOFT_DIRTY _PAGE_BIT_HIDDEN
+
#ifdef CONFIG_MEM_SOFT_DIRTY
-#define _PAGE_SOFT_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
+#define _PAGE_SOFT_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_SOFT_DIRTY)
#else
#define _PAGE_SOFT_DIRTY (_AT(pteval_t, 0))
#endif
+/*
+ * Tracking soft dirty bit when a page goes to a swap is tricky.
+ * We need a bit which can be stored in pte _and_ not conflict
+ * with swap entry format. On x86 bits 6 and 7 are *not* involved
+ * into swap entry computation, but bit 6 is used for nonlinear
+ * file mapping, so we borrow bit 7 for soft dirty tracking.
+ */
+#ifdef CONFIG_MEM_SOFT_DIRTY
+#define _PAGE_SWP_SOFT_DIRTY _PAGE_PSE
+#else
+#define _PAGE_SWP_SOFT_DIRTY (_AT(pteval_t, 0))
+#endif
+
#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
#else
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
index d68883d..8963bfe 100644
--- a/arch/x86/include/asm/spinlock.h
+++ b/arch/x86/include/asm/spinlock.h
@@ -279,8 +279,4 @@
#define arch_read_relax(lock) cpu_relax()
#define arch_write_relax(lock) cpu_relax()
-/* The {read|write|spin}_lock() on x86 are full memory barriers. */
-static inline void smp_mb__after_lock(void) { }
-#define ARCH_HAS_SMP_MB_AFTER_LOCK
-
#endif /* _ASM_X86_SPINLOCK_H */
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index f654ece..08a0890 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -512,7 +512,7 @@
static const int amd_erratum_383[];
static const int amd_erratum_400[];
-static bool cpu_has_amd_erratum(const int *erratum);
+static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum);
static void init_amd(struct cpuinfo_x86 *c)
{
@@ -729,11 +729,11 @@
value &= ~(1ULL << 24);
wrmsrl_safe(MSR_AMD64_BU_CFG2, value);
- if (cpu_has_amd_erratum(amd_erratum_383))
+ if (cpu_has_amd_erratum(c, amd_erratum_383))
set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH);
}
- if (cpu_has_amd_erratum(amd_erratum_400))
+ if (cpu_has_amd_erratum(c, amd_erratum_400))
set_cpu_bug(c, X86_BUG_AMD_APIC_C1E);
rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);
@@ -878,23 +878,13 @@
static const int amd_erratum_383[] =
AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf));
-static bool cpu_has_amd_erratum(const int *erratum)
+
+static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
{
- struct cpuinfo_x86 *cpu = __this_cpu_ptr(&cpu_info);
int osvw_id = *erratum++;
u32 range;
u32 ms;
- /*
- * If called early enough that current_cpu_data hasn't been initialized
- * yet, fall back to boot_cpu_data.
- */
- if (cpu->x86 == 0)
- cpu = &boot_cpu_data;
-
- if (cpu->x86_vendor != X86_VENDOR_AMD)
- return false;
-
if (osvw_id >= 0 && osvw_id < 65536 &&
cpu_has(cpu, X86_FEATURE_OSVW)) {
u64 osvw_len;
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index fbc9210..a45d8d4 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -2270,6 +2270,7 @@
case 70:
case 71:
case 63:
+ case 69:
x86_pmu.late_ack = true;
memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, sizeof(hw_cache_event_ids));
memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
index cad791d..1fb6c72 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
@@ -314,8 +314,8 @@
static struct uncore_event_desc snbep_uncore_qpi_events[] = {
INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x14"),
INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"),
- INTEL_UNCORE_EVENT_DESC(drs_data, "event=0x02,umask=0x08"),
- INTEL_UNCORE_EVENT_DESC(ncb_data, "event=0x03,umask=0x04"),
+ INTEL_UNCORE_EVENT_DESC(drs_data, "event=0x102,umask=0x08"),
+ INTEL_UNCORE_EVENT_DESC(ncb_data, "event=0x103,umask=0x04"),
{ /* end: all zeroes */ },
};
diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c
index 94ab6b9..63bdb29 100644
--- a/arch/x86/kernel/early-quirks.c
+++ b/arch/x86/kernel/early-quirks.c
@@ -196,15 +196,23 @@
static void __init intel_remapping_check(int num, int slot, int func)
{
u8 revision;
+ u16 device;
+ device = read_pci_config_16(num, slot, func, PCI_DEVICE_ID);
revision = read_pci_config_byte(num, slot, func, PCI_REVISION_ID);
/*
- * Revision 0x13 of this chipset supports irq remapping
- * but has an erratum that breaks its behavior, flag it as such
+ * Revision 13 of all triggering devices id in this quirk have
+ * a problem draining interrupts when irq remapping is enabled,
+ * and should be flagged as broken. Additionally revisions 0x12
+ * and 0x22 of device id 0x3405 has this problem.
*/
if (revision == 0x13)
set_irq_remapping_broken();
+ else if ((device == 0x3405) &&
+ ((revision == 0x12) ||
+ (revision == 0x22)))
+ set_irq_remapping_broken();
}
@@ -239,6 +247,8 @@
PCI_CLASS_SERIAL_SMBUS, PCI_ANY_ID, 0, ati_bugs_contd },
{ PCI_VENDOR_ID_INTEL, 0x3403, PCI_CLASS_BRIDGE_HOST,
PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check },
+ { PCI_VENDOR_ID_INTEL, 0x3405, PCI_CLASS_BRIDGE_HOST,
+ PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check },
{ PCI_VENDOR_ID_INTEL, 0x3406, PCI_CLASS_BRIDGE_HOST,
PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check },
{}
diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
index 202d24f..5d576ab 100644
--- a/arch/x86/kernel/i387.c
+++ b/arch/x86/kernel/i387.c
@@ -116,7 +116,7 @@
if (cpu_has_fxsr) {
memset(&fx_scratch, 0, sizeof(struct i387_fxsave_struct));
- asm volatile("fxsave %0" : : "m" (fx_scratch));
+ asm volatile("fxsave %0" : "+m" (fx_scratch));
mask = fx_scratch.mxcsr_mask;
if (mask == 0)
mask = 0x0000ffbf;
diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c
index 47ebb1d..7123b5d 100644
--- a/arch/x86/kernel/microcode_amd.c
+++ b/arch/x86/kernel/microcode_amd.c
@@ -145,10 +145,9 @@
return 0;
}
-static unsigned int verify_patch_size(int cpu, u32 patch_size,
+static unsigned int verify_patch_size(u8 family, u32 patch_size,
unsigned int size)
{
- struct cpuinfo_x86 *c = &cpu_data(cpu);
u32 max_size;
#define F1XH_MPB_MAX_SIZE 2048
@@ -156,7 +155,7 @@
#define F15H_MPB_MAX_SIZE 4096
#define F16H_MPB_MAX_SIZE 3458
- switch (c->x86) {
+ switch (family) {
case 0x14:
max_size = F14H_MPB_MAX_SIZE;
break;
@@ -220,12 +219,13 @@
return 0;
}
- if (__apply_microcode_amd(mc_amd))
+ if (__apply_microcode_amd(mc_amd)) {
pr_err("CPU%d: update failed for patch_level=0x%08x\n",
cpu, mc_amd->hdr.patch_id);
- else
- pr_info("CPU%d: new patch_level=0x%08x\n", cpu,
- mc_amd->hdr.patch_id);
+ return -1;
+ }
+ pr_info("CPU%d: new patch_level=0x%08x\n", cpu,
+ mc_amd->hdr.patch_id);
uci->cpu_sig.rev = mc_amd->hdr.patch_id;
c->microcode = mc_amd->hdr.patch_id;
@@ -276,9 +276,8 @@
* driver cannot continue functioning normally. In such cases, we tear
* down everything we've used up so far and exit.
*/
-static int verify_and_add_patch(unsigned int cpu, u8 *fw, unsigned int leftover)
+static int verify_and_add_patch(u8 family, u8 *fw, unsigned int leftover)
{
- struct cpuinfo_x86 *c = &cpu_data(cpu);
struct microcode_header_amd *mc_hdr;
struct ucode_patch *patch;
unsigned int patch_size, crnt_size, ret;
@@ -298,7 +297,7 @@
/* check if patch is for the current family */
proc_fam = ((proc_fam >> 8) & 0xf) + ((proc_fam >> 20) & 0xff);
- if (proc_fam != c->x86)
+ if (proc_fam != family)
return crnt_size;
if (mc_hdr->nb_dev_id || mc_hdr->sb_dev_id) {
@@ -307,7 +306,7 @@
return crnt_size;
}
- ret = verify_patch_size(cpu, patch_size, leftover);
+ ret = verify_patch_size(family, patch_size, leftover);
if (!ret) {
pr_err("Patch-ID 0x%08x: size mismatch.\n", mc_hdr->patch_id);
return crnt_size;
@@ -338,7 +337,8 @@
return crnt_size;
}
-static enum ucode_state __load_microcode_amd(int cpu, const u8 *data, size_t size)
+static enum ucode_state __load_microcode_amd(u8 family, const u8 *data,
+ size_t size)
{
enum ucode_state ret = UCODE_ERROR;
unsigned int leftover;
@@ -361,7 +361,7 @@
}
while (leftover) {
- crnt_size = verify_and_add_patch(cpu, fw, leftover);
+ crnt_size = verify_and_add_patch(family, fw, leftover);
if (crnt_size < 0)
return ret;
@@ -372,22 +372,22 @@
return UCODE_OK;
}
-enum ucode_state load_microcode_amd(int cpu, const u8 *data, size_t size)
+enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size)
{
enum ucode_state ret;
/* free old equiv table */
free_equiv_cpu_table();
- ret = __load_microcode_amd(cpu, data, size);
+ ret = __load_microcode_amd(family, data, size);
if (ret != UCODE_OK)
cleanup();
#if defined(CONFIG_MICROCODE_AMD_EARLY) && defined(CONFIG_X86_32)
/* save BSP's matching patch for early load */
- if (cpu_data(cpu).cpu_index == boot_cpu_data.cpu_index) {
- struct ucode_patch *p = find_patch(cpu);
+ if (cpu_data(smp_processor_id()).cpu_index == boot_cpu_data.cpu_index) {
+ struct ucode_patch *p = find_patch(smp_processor_id());
if (p) {
memset(amd_bsp_mpb, 0, MPB_MAX_SIZE);
memcpy(amd_bsp_mpb, p->data, min_t(u32, ksize(p->data),
@@ -440,7 +440,7 @@
goto fw_release;
}
- ret = load_microcode_amd(cpu, fw->data, fw->size);
+ ret = load_microcode_amd(c->x86, fw->data, fw->size);
fw_release:
release_firmware(fw);
diff --git a/arch/x86/kernel/microcode_amd_early.c b/arch/x86/kernel/microcode_amd_early.c
index 1d14ffe..6073104 100644
--- a/arch/x86/kernel/microcode_amd_early.c
+++ b/arch/x86/kernel/microcode_amd_early.c
@@ -238,25 +238,17 @@
uci->cpu_sig.sig = cpuid_eax(0x00000001);
}
#else
-static void collect_cpu_info_amd_early(struct cpuinfo_x86 *c,
- struct ucode_cpu_info *uci)
+void load_ucode_amd_ap(void)
{
+ unsigned int cpu = smp_processor_id();
+ struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
u32 rev, eax;
rdmsr(MSR_AMD64_PATCH_LEVEL, rev, eax);
eax = cpuid_eax(0x00000001);
- uci->cpu_sig.sig = eax;
uci->cpu_sig.rev = rev;
- c->microcode = rev;
- c->x86 = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff);
-}
-
-void load_ucode_amd_ap(void)
-{
- unsigned int cpu = smp_processor_id();
-
- collect_cpu_info_amd_early(&cpu_data(cpu), ucode_cpu_info + cpu);
+ uci->cpu_sig.sig = eax;
if (cpu && !ucode_loaded) {
void *ucode;
@@ -265,8 +257,10 @@
return;
ucode = (void *)(initrd_start + ucode_offset);
- if (load_microcode_amd(0, ucode, ucode_size) != UCODE_OK)
+ eax = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff);
+ if (load_microcode_amd(eax, ucode, ucode_size) != UCODE_OK)
return;
+
ucode_loaded = true;
}
@@ -278,6 +272,8 @@
{
enum ucode_state ret;
void *ucode;
+ u32 eax;
+
#ifdef CONFIG_X86_32
unsigned int bsp = boot_cpu_data.cpu_index;
struct ucode_cpu_info *uci = ucode_cpu_info + bsp;
@@ -293,7 +289,10 @@
return 0;
ucode = (void *)(initrd_start + ucode_offset);
- ret = load_microcode_amd(0, ucode, ucode_size);
+ eax = cpuid_eax(0x00000001);
+ eax = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff);
+
+ ret = load_microcode_amd(eax, ucode, ucode_size);
if (ret != UCODE_OK)
return -EINVAL;
diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
index dbded5a..30277e2 100644
--- a/arch/x86/kernel/sys_x86_64.c
+++ b/arch/x86/kernel/sys_x86_64.c
@@ -101,7 +101,7 @@
*begin = new_begin;
}
} else {
- *begin = TASK_UNMAPPED_BASE;
+ *begin = current->mm->mmap_legacy_base;
*end = TASK_SIZE;
}
}
diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
index 62c29a5..25e7e13 100644
--- a/arch/x86/mm/mmap.c
+++ b/arch/x86/mm/mmap.c
@@ -112,11 +112,13 @@
*/
void arch_pick_mmap_layout(struct mm_struct *mm)
{
+ mm->mmap_legacy_base = mmap_legacy_base();
+ mm->mmap_base = mmap_base();
+
if (mmap_is_legacy()) {
- mm->mmap_base = mmap_legacy_base();
+ mm->mmap_base = mm->mmap_legacy_base;
mm->get_unmapped_area = arch_get_unmapped_area;
} else {
- mm->mmap_base = mmap_base();
mm->get_unmapped_area = arch_get_unmapped_area_topdown;
}
}
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index 6243651..09f3059 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -322,6 +322,17 @@
e820_add_region(start, end - start, type);
}
+void xen_ignore_unusable(struct e820entry *list, size_t map_size)
+{
+ struct e820entry *entry;
+ unsigned int i;
+
+ for (i = 0, entry = list; i < map_size; i++, entry++) {
+ if (entry->type == E820_UNUSABLE)
+ entry->type = E820_RAM;
+ }
+}
+
/**
* machine_specific_memory_setup - Hook for machine specific memory setup.
**/
@@ -362,6 +373,17 @@
}
BUG_ON(rc);
+ /*
+ * Xen won't allow a 1:1 mapping to be created to UNUSABLE
+ * regions, so if we're using the machine memory map leave the
+ * region as RAM as it is in the pseudo-physical map.
+ *
+ * UNUSABLE regions in domUs are not handled and will need
+ * a patch in the future.
+ */
+ if (xen_initial_domain())
+ xen_ignore_unusable(map, memmap.nr_entries);
+
/* Make sure the Xen-supplied memory map is well-ordered. */
sanitize_e820_map(map, memmap.nr_entries, &memmap.nr_entries);
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 368c290..9235842 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -700,8 +700,15 @@
static int xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle)
{
int rc;
- rc = native_cpu_up(cpu, tidle);
- WARN_ON (xen_smp_intr_init(cpu));
+ /*
+ * xen_smp_intr_init() needs to run before native_cpu_up()
+ * so that IPI vectors are set up on the booting CPU before
+ * it is marked online in native_cpu_up().
+ */
+ rc = xen_smp_intr_init(cpu);
+ WARN_ON(rc);
+ if (!rc)
+ rc = native_cpu_up(cpu, tidle);
return rc;
}
diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c
index fd6c51c..5a74a9c 100644
--- a/drivers/acpi/acpi_processor.c
+++ b/drivers/acpi/acpi_processor.c
@@ -451,7 +451,6 @@
/* Clean up. */
per_cpu(processor_device_array, pr->id) = NULL;
per_cpu(processors, pr->id) = NULL;
- try_offline_node(cpu_to_node(pr->id));
/* Remove the CPU. */
get_online_cpus();
@@ -459,6 +458,8 @@
acpi_unmap_lsapic(pr->id);
put_online_cpus();
+ try_offline_node(cpu_to_node(pr->id));
+
out:
free_cpumask_var(pr->throttling.shared_cpu_map);
kfree(pr);
diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c
index f680957..408f6b2 100644
--- a/drivers/acpi/glue.c
+++ b/drivers/acpi/glue.c
@@ -31,6 +31,7 @@
static DECLARE_RWSEM(bus_type_sem);
#define PHYSICAL_NODE_STRING "physical_node"
+#define PHYSICAL_NODE_NAME_SIZE (sizeof(PHYSICAL_NODE_STRING) + 10)
int register_acpi_bus_type(struct acpi_bus_type *type)
{
@@ -78,41 +79,108 @@
return ret;
}
-static acpi_status do_acpi_find_child(acpi_handle handle, u32 lvl_not_used,
- void *addr_p, void **ret_p)
+static acpi_status acpi_dev_present(acpi_handle handle, u32 lvl_not_used,
+ void *not_used, void **ret_p)
{
- unsigned long long addr, sta;
- acpi_status status;
+ struct acpi_device *adev = NULL;
- status = acpi_evaluate_integer(handle, METHOD_NAME__ADR, NULL, &addr);
- if (ACPI_SUCCESS(status) && addr == *((u64 *)addr_p)) {
+ acpi_bus_get_device(handle, &adev);
+ if (adev) {
*ret_p = handle;
- status = acpi_bus_get_status_handle(handle, &sta);
- if (ACPI_SUCCESS(status) && (sta & ACPI_STA_DEVICE_ENABLED))
- return AE_CTRL_TERMINATE;
+ return AE_CTRL_TERMINATE;
}
return AE_OK;
}
-acpi_handle acpi_get_child(acpi_handle parent, u64 address)
+static bool acpi_extra_checks_passed(acpi_handle handle, bool is_bridge)
{
- void *ret = NULL;
+ unsigned long long sta;
+ acpi_status status;
- if (!parent)
- return NULL;
+ status = acpi_bus_get_status_handle(handle, &sta);
+ if (ACPI_FAILURE(status) || !(sta & ACPI_STA_DEVICE_ENABLED))
+ return false;
- acpi_walk_namespace(ACPI_TYPE_DEVICE, parent, 1, NULL,
- do_acpi_find_child, &address, &ret);
- return (acpi_handle)ret;
+ if (is_bridge) {
+ void *test = NULL;
+
+ /* Check if this object has at least one child device. */
+ acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
+ acpi_dev_present, NULL, NULL, &test);
+ return !!test;
+ }
+ return true;
}
-EXPORT_SYMBOL(acpi_get_child);
+
+struct find_child_context {
+ u64 addr;
+ bool is_bridge;
+ acpi_handle ret;
+ bool ret_checked;
+};
+
+static acpi_status do_find_child(acpi_handle handle, u32 lvl_not_used,
+ void *data, void **not_used)
+{
+ struct find_child_context *context = data;
+ unsigned long long addr;
+ acpi_status status;
+
+ status = acpi_evaluate_integer(handle, METHOD_NAME__ADR, NULL, &addr);
+ if (ACPI_FAILURE(status) || addr != context->addr)
+ return AE_OK;
+
+ if (!context->ret) {
+ /* This is the first matching object. Save its handle. */
+ context->ret = handle;
+ return AE_OK;
+ }
+ /*
+ * There is more than one matching object with the same _ADR value.
+ * That really is unexpected, so we are kind of beyond the scope of the
+ * spec here. We have to choose which one to return, though.
+ *
+ * First, check if the previously found object is good enough and return
+ * its handle if so. Second, check the same for the object that we've
+ * just found.
+ */
+ if (!context->ret_checked) {
+ if (acpi_extra_checks_passed(context->ret, context->is_bridge))
+ return AE_CTRL_TERMINATE;
+ else
+ context->ret_checked = true;
+ }
+ if (acpi_extra_checks_passed(handle, context->is_bridge)) {
+ context->ret = handle;
+ return AE_CTRL_TERMINATE;
+ }
+ return AE_OK;
+}
+
+acpi_handle acpi_find_child(acpi_handle parent, u64 addr, bool is_bridge)
+{
+ if (parent) {
+ struct find_child_context context = {
+ .addr = addr,
+ .is_bridge = is_bridge,
+ };
+
+ acpi_walk_namespace(ACPI_TYPE_DEVICE, parent, 1, do_find_child,
+ NULL, &context, NULL);
+ return context.ret;
+ }
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(acpi_find_child);
int acpi_bind_one(struct device *dev, acpi_handle handle)
{
struct acpi_device *acpi_dev;
acpi_status status;
struct acpi_device_physical_node *physical_node, *pn;
- char physical_node_name[sizeof(PHYSICAL_NODE_STRING) + 2];
+ char physical_node_name[PHYSICAL_NODE_NAME_SIZE];
+ struct list_head *physnode_list;
+ unsigned int node_id;
int retval = -EINVAL;
if (ACPI_HANDLE(dev)) {
@@ -139,25 +207,27 @@
mutex_lock(&acpi_dev->physical_node_lock);
- /* Sanity check. */
- list_for_each_entry(pn, &acpi_dev->physical_node_list, node)
+ /*
+ * Keep the list sorted by node_id so that the IDs of removed nodes can
+ * be recycled easily.
+ */
+ physnode_list = &acpi_dev->physical_node_list;
+ node_id = 0;
+ list_for_each_entry(pn, &acpi_dev->physical_node_list, node) {
+ /* Sanity check. */
if (pn->dev == dev) {
dev_warn(dev, "Already associated with ACPI node\n");
goto err_free;
}
-
- /* allocate physical node id according to physical_node_id_bitmap */
- physical_node->node_id =
- find_first_zero_bit(acpi_dev->physical_node_id_bitmap,
- ACPI_MAX_PHYSICAL_NODE);
- if (physical_node->node_id >= ACPI_MAX_PHYSICAL_NODE) {
- retval = -ENOSPC;
- goto err_free;
+ if (pn->node_id == node_id) {
+ physnode_list = &pn->node;
+ node_id++;
+ }
}
- set_bit(physical_node->node_id, acpi_dev->physical_node_id_bitmap);
+ physical_node->node_id = node_id;
physical_node->dev = dev;
- list_add_tail(&physical_node->node, &acpi_dev->physical_node_list);
+ list_add(&physical_node->node, physnode_list);
acpi_dev->physical_node_count++;
mutex_unlock(&acpi_dev->physical_node_lock);
@@ -208,7 +278,7 @@
mutex_lock(&acpi_dev->physical_node_lock);
list_for_each_safe(node, next, &acpi_dev->physical_node_list) {
- char physical_node_name[sizeof(PHYSICAL_NODE_STRING) + 2];
+ char physical_node_name[PHYSICAL_NODE_NAME_SIZE];
entry = list_entry(node, struct acpi_device_physical_node,
node);
@@ -216,7 +286,6 @@
continue;
list_del(node);
- clear_bit(entry->node_id, acpi_dev->physical_node_id_bitmap);
acpi_dev->physical_node_count--;
diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
index aa1227a..04a1378 100644
--- a/drivers/acpi/proc.c
+++ b/drivers/acpi/proc.c
@@ -311,6 +311,8 @@
dev->pnp.bus_id,
(u32) dev->wakeup.sleep_state);
+ mutex_lock(&dev->physical_node_lock);
+
if (!dev->physical_node_count) {
seq_printf(seq, "%c%-8s\n",
dev->wakeup.flags.run_wake ? '*' : ' ',
@@ -338,6 +340,8 @@
put_device(ldev);
}
}
+
+ mutex_unlock(&dev->physical_node_lock);
}
mutex_unlock(&acpi_device_lock);
return 0;
@@ -347,12 +351,16 @@
{
struct acpi_device_physical_node *entry;
+ mutex_lock(&adev->physical_node_lock);
+
list_for_each_entry(entry,
&adev->physical_node_list, node)
if (entry->dev && device_can_wakeup(entry->dev)) {
bool enable = !device_may_wakeup(entry->dev);
device_set_wakeup_enable(entry->dev, enable);
}
+
+ mutex_unlock(&adev->physical_node_lock);
}
static ssize_t
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 0ec434d..3270d3c 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -689,7 +689,7 @@
* Some systems always report current brightness level as maximum
* through _BQC, we need to test another value for them.
*/
- test_level = current_level == max_level ? br->levels[2] : max_level;
+ test_level = current_level == max_level ? br->levels[3] : max_level;
result = acpi_video_device_lcd_set_level(device, test_level);
if (result)
@@ -908,9 +908,6 @@
device->cap._DDC = 1;
}
- if (acpi_video_init_brightness(device))
- return;
-
if (acpi_video_backlight_support()) {
struct backlight_properties props;
struct pci_dev *pdev;
@@ -920,6 +917,9 @@
static int count = 0;
char *name;
+ result = acpi_video_init_brightness(device);
+ if (result)
+ return;
name = kasprintf(GFP_KERNEL, "acpi_video%d", count);
if (!name)
return;
@@ -979,11 +979,6 @@
if (result)
printk(KERN_ERR PREFIX "Create sysfs link\n");
- } else {
- /* Remove the brightness object. */
- kfree(device->brightness->levels);
- kfree(device->brightness);
- device->brightness = NULL;
}
}
diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c
index 1c41722..20fd337 100644
--- a/drivers/ata/libata-pmp.c
+++ b/drivers/ata/libata-pmp.c
@@ -289,24 +289,24 @@
/* Disable sending Early R_OK.
* With "cached read" HDD testing and multiple ports busy on a SATA
- * host controller, 3726 PMP will very rarely drop a deferred
+ * host controller, 3x26 PMP will very rarely drop a deferred
* R_OK that was intended for the host. Symptom will be all
* 5 drives under test will timeout, get reset, and recover.
*/
- if (vendor == 0x1095 && devid == 0x3726) {
+ if (vendor == 0x1095 && (devid == 0x3726 || devid == 0x3826)) {
u32 reg;
err_mask = sata_pmp_read(&ap->link, PMP_GSCR_SII_POL, ®);
if (err_mask) {
rc = -EIO;
- reason = "failed to read Sil3726 Private Register";
+ reason = "failed to read Sil3x26 Private Register";
goto fail;
}
reg &= ~0x1;
err_mask = sata_pmp_write(&ap->link, PMP_GSCR_SII_POL, reg);
if (err_mask) {
rc = -EIO;
- reason = "failed to write Sil3726 Private Register";
+ reason = "failed to write Sil3x26 Private Register";
goto fail;
}
}
@@ -383,8 +383,8 @@
u16 devid = sata_pmp_gscr_devid(gscr);
struct ata_link *link;
- if (vendor == 0x1095 && devid == 0x3726) {
- /* sil3726 quirks */
+ if (vendor == 0x1095 && (devid == 0x3726 || devid == 0x3826)) {
+ /* sil3x26 quirks */
ata_for_each_link(link, ap, EDGE) {
/* link reports offline after LPM */
link->flags |= ATA_LFLAG_NO_LPM;
diff --git a/drivers/ata/pata_imx.c b/drivers/ata/pata_imx.c
index 4ec7c04..26386f0 100644
--- a/drivers/ata/pata_imx.c
+++ b/drivers/ata/pata_imx.c
@@ -237,6 +237,7 @@
/* sentinel */
}
};
+MODULE_DEVICE_TABLE(of, imx_pata_dt_ids);
static struct platform_driver pata_imx_driver = {
.probe = pata_imx_probe,
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
index 19720a0..851bd3f 100644
--- a/drivers/ata/sata_fsl.c
+++ b/drivers/ata/sata_fsl.c
@@ -293,6 +293,7 @@
{
struct sata_fsl_host_priv *host_priv = host->private_data;
void __iomem *hcr_base = host_priv->hcr_base;
+ unsigned long flags;
if (count > ICC_MAX_INT_COUNT_THRESHOLD)
count = ICC_MAX_INT_COUNT_THRESHOLD;
@@ -305,12 +306,12 @@
(count > ICC_MIN_INT_COUNT_THRESHOLD))
ticks = ICC_SAFE_INT_TICKS;
- spin_lock(&host->lock);
+ spin_lock_irqsave(&host->lock, flags);
iowrite32((count << 24 | ticks), hcr_base + ICC);
intr_coalescing_count = count;
intr_coalescing_ticks = ticks;
- spin_unlock(&host->lock);
+ spin_unlock_irqrestore(&host->lock, flags);
DPRINTK("interrupt coalescing, count = 0x%x, ticks = %x\n",
intr_coalescing_count, intr_coalescing_ticks);
diff --git a/drivers/ata/sata_highbank.c b/drivers/ata/sata_highbank.c
index d047d92..e9a4f46 100644
--- a/drivers/ata/sata_highbank.c
+++ b/drivers/ata/sata_highbank.c
@@ -86,11 +86,11 @@
#define SGPIO_SIGNALS 3
#define ECX_ACTIVITY_BITS 0x300000
-#define ECX_ACTIVITY_SHIFT 2
+#define ECX_ACTIVITY_SHIFT 0
#define ECX_LOCATE_BITS 0x80000
#define ECX_LOCATE_SHIFT 1
#define ECX_FAULT_BITS 0x400000
-#define ECX_FAULT_SHIFT 0
+#define ECX_FAULT_SHIFT 2
static inline int sgpio_bit_shift(struct ecx_plat_data *pdata, u32 port,
u32 shift)
{
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
index e691026..3455f83 100644
--- a/drivers/base/regmap/regcache.c
+++ b/drivers/base/regmap/regcache.c
@@ -719,7 +719,8 @@
}
}
- return regcache_sync_block_raw_flush(map, &data, base, regtmp);
+ return regcache_sync_block_raw_flush(map, &data, base, regtmp +
+ map->reg_stride);
}
int regcache_sync_block(struct regmap *map, void *block,
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index 99cb944..4d45dba 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -906,16 +906,10 @@
int i;
bio_for_each_segment(bv, bio, i) {
- page = bv->bv_page;
/* Non-zero page count for non-head members of
- * compound pages is no longer allowed by the kernel,
- * but this has never been seen here.
+ * compound pages is no longer allowed by the kernel.
*/
- if (unlikely(PageCompound(page)))
- if (compound_trans_head(page) != page) {
- pr_crit("page tail used for block I/O\n");
- BUG();
- }
+ page = compound_trans_head(bv->bv_page);
atomic_inc(&page->_count);
}
}
@@ -924,10 +918,13 @@
bio_pagedec(struct bio *bio)
{
struct bio_vec *bv;
+ struct page *page;
int i;
- bio_for_each_segment(bv, bio, i)
- atomic_dec(&bv->bv_page->_count);
+ bio_for_each_segment(bv, bio, i) {
+ page = compound_trans_head(bv->bv_page);
+ atomic_dec(&page->_count);
+ }
}
static void
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 1b456fe..fc45567 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -272,9 +272,12 @@
unsigned long flags;
spin_lock_irqsave(&portdev->ports_lock, flags);
- list_for_each_entry(port, &portdev->ports, list)
- if (port->cdev->dev == dev)
+ list_for_each_entry(port, &portdev->ports, list) {
+ if (port->cdev->dev == dev) {
+ kref_get(&port->kref);
goto out;
+ }
+ }
port = NULL;
out:
spin_unlock_irqrestore(&portdev->ports_lock, flags);
@@ -746,6 +749,10 @@
port = filp->private_data;
+ /* Port is hot-unplugged. */
+ if (!port->guest_connected)
+ return -ENODEV;
+
if (!port_has_data(port)) {
/*
* If nothing's connected on the host just return 0 in
@@ -762,7 +769,7 @@
if (ret < 0)
return ret;
}
- /* Port got hot-unplugged. */
+ /* Port got hot-unplugged while we were waiting above. */
if (!port->guest_connected)
return -ENODEV;
/*
@@ -932,13 +939,25 @@
if (is_rproc_serial(port->out_vq->vdev))
return -EINVAL;
+ /*
+ * pipe->nrbufs == 0 means there are no data to transfer,
+ * so this returns just 0 for no data.
+ */
+ pipe_lock(pipe);
+ if (!pipe->nrbufs) {
+ ret = 0;
+ goto error_out;
+ }
+
ret = wait_port_writable(port, filp->f_flags & O_NONBLOCK);
if (ret < 0)
- return ret;
+ goto error_out;
buf = alloc_buf(port->out_vq, 0, pipe->nrbufs);
- if (!buf)
- return -ENOMEM;
+ if (!buf) {
+ ret = -ENOMEM;
+ goto error_out;
+ }
sgl.n = 0;
sgl.len = 0;
@@ -946,12 +965,17 @@
sgl.sg = buf->sg;
sg_init_table(sgl.sg, sgl.size);
ret = __splice_from_pipe(pipe, &sd, pipe_to_sg);
+ pipe_unlock(pipe);
if (likely(ret > 0))
ret = __send_to_port(port, buf->sg, sgl.n, sgl.len, buf, true);
if (unlikely(ret <= 0))
free_buf(buf, true);
return ret;
+
+error_out:
+ pipe_unlock(pipe);
+ return ret;
}
static unsigned int port_fops_poll(struct file *filp, poll_table *wait)
@@ -1019,14 +1043,14 @@
struct port *port;
int ret;
+ /* We get the port with a kref here */
port = find_port_by_devt(cdev->dev);
+ if (!port) {
+ /* Port was unplugged before we could proceed */
+ return -ENXIO;
+ }
filp->private_data = port;
- /* Prevent against a port getting hot-unplugged at the same time */
- spin_lock_irq(&port->portdev->ports_lock);
- kref_get(&port->kref);
- spin_unlock_irq(&port->portdev->ports_lock);
-
/*
* Don't allow opening of console port devices -- that's done
* via /dev/hvc
@@ -1498,14 +1522,6 @@
port = container_of(kref, struct port, kref);
- sysfs_remove_group(&port->dev->kobj, &port_attribute_group);
- device_destroy(pdrvdata.class, port->dev->devt);
- cdev_del(port->cdev);
-
- kfree(port->name);
-
- debugfs_remove(port->debugfs_file);
-
kfree(port);
}
@@ -1539,12 +1555,14 @@
spin_unlock_irq(&port->portdev->ports_lock);
if (port->guest_connected) {
- port->guest_connected = false;
- port->host_connected = false;
- wake_up_interruptible(&port->waitqueue);
-
/* Let the app know the port is going down. */
send_sigio_to_port(port);
+
+ /* Do this after sigio is actually sent */
+ port->guest_connected = false;
+ port->host_connected = false;
+
+ wake_up_interruptible(&port->waitqueue);
}
if (is_console_port(port)) {
@@ -1563,6 +1581,14 @@
*/
port->portdev = NULL;
+ sysfs_remove_group(&port->dev->kobj, &port_attribute_group);
+ device_destroy(pdrvdata.class, port->dev->devt);
+ cdev_del(port->cdev);
+
+ kfree(port->name);
+
+ debugfs_remove(port->debugfs_file);
+
/*
* Locks around here are not necessary - a port can't be
* opened after we removed the port struct from ports_list
diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c
index 1bdb882..4e57397 100644
--- a/drivers/clk/samsung/clk-exynos4.c
+++ b/drivers/clk/samsung/clk-exynos4.c
@@ -581,11 +581,15 @@
DIV(none, "div_spi1_isp", "mout_spi1_isp", E4X12_DIV_ISP, 16, 4),
DIV(none, "div_spi1_isp_pre", "div_spi1_isp", E4X12_DIV_ISP, 20, 8),
DIV(none, "div_uart_isp", "mout_uart_isp", E4X12_DIV_ISP, 28, 4),
- DIV(div_isp0, "div_isp0", "aclk200", E4X12_DIV_ISP0, 0, 3),
- DIV(div_isp1, "div_isp1", "aclk200", E4X12_DIV_ISP0, 4, 3),
+ DIV_F(div_isp0, "div_isp0", "aclk200", E4X12_DIV_ISP0, 0, 3,
+ CLK_GET_RATE_NOCACHE, 0),
+ DIV_F(div_isp1, "div_isp1", "aclk200", E4X12_DIV_ISP0, 4, 3,
+ CLK_GET_RATE_NOCACHE, 0),
DIV(none, "div_mpwm", "div_isp1", E4X12_DIV_ISP1, 0, 3),
- DIV(div_mcuisp0, "div_mcuisp0", "aclk400_mcuisp", E4X12_DIV_ISP1, 4, 3),
- DIV(div_mcuisp1, "div_mcuisp1", "div_mcuisp0", E4X12_DIV_ISP1, 8, 3),
+ DIV_F(div_mcuisp0, "div_mcuisp0", "aclk400_mcuisp", E4X12_DIV_ISP1,
+ 4, 3, CLK_GET_RATE_NOCACHE, 0),
+ DIV_F(div_mcuisp1, "div_mcuisp1", "div_mcuisp0", E4X12_DIV_ISP1,
+ 8, 3, CLK_GET_RATE_NOCACHE, 0),
DIV(sclk_fimg2d, "sclk_fimg2d", "mout_g2d", DIV_DMC1, 0, 4),
};
@@ -863,57 +867,57 @@
GATE_DA(i2s0, "samsung-i2s.0", "i2s0", "aclk100",
E4X12_GATE_IP_MAUDIO, 3, 0, 0, "iis"),
GATE(fimc_isp, "isp", "aclk200", E4X12_GATE_ISP0, 0,
- CLK_IGNORE_UNUSED, 0),
+ CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(fimc_drc, "drc", "aclk200", E4X12_GATE_ISP0, 1,
- CLK_IGNORE_UNUSED, 0),
+ CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(fimc_fd, "fd", "aclk200", E4X12_GATE_ISP0, 2,
- CLK_IGNORE_UNUSED, 0),
+ CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(fimc_lite0, "lite0", "aclk200", E4X12_GATE_ISP0, 3,
- CLK_IGNORE_UNUSED, 0),
+ CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(fimc_lite1, "lite1", "aclk200", E4X12_GATE_ISP0, 4,
- CLK_IGNORE_UNUSED, 0),
+ CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(mcuisp, "mcuisp", "aclk200", E4X12_GATE_ISP0, 5,
- CLK_IGNORE_UNUSED, 0),
+ CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(gicisp, "gicisp", "aclk200", E4X12_GATE_ISP0, 7,
- CLK_IGNORE_UNUSED, 0),
+ CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(smmu_isp, "smmu_isp", "aclk200", E4X12_GATE_ISP0, 8,
- CLK_IGNORE_UNUSED, 0),
+ CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(smmu_drc, "smmu_drc", "aclk200", E4X12_GATE_ISP0, 9,
- CLK_IGNORE_UNUSED, 0),
+ CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(smmu_fd, "smmu_fd", "aclk200", E4X12_GATE_ISP0, 10,
- CLK_IGNORE_UNUSED, 0),
+ CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(smmu_lite0, "smmu_lite0", "aclk200", E4X12_GATE_ISP0, 11,
- CLK_IGNORE_UNUSED, 0),
+ CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(smmu_lite1, "smmu_lite1", "aclk200", E4X12_GATE_ISP0, 12,
- CLK_IGNORE_UNUSED, 0),
+ CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(ppmuispmx, "ppmuispmx", "aclk200", E4X12_GATE_ISP0, 20,
- CLK_IGNORE_UNUSED, 0),
+ CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(ppmuispx, "ppmuispx", "aclk200", E4X12_GATE_ISP0, 21,
- CLK_IGNORE_UNUSED, 0),
+ CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(mcuctl_isp, "mcuctl_isp", "aclk200", E4X12_GATE_ISP0, 23,
- CLK_IGNORE_UNUSED, 0),
+ CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(mpwm_isp, "mpwm_isp", "aclk200", E4X12_GATE_ISP0, 24,
- CLK_IGNORE_UNUSED, 0),
+ CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(i2c0_isp, "i2c0_isp", "aclk200", E4X12_GATE_ISP0, 25,
- CLK_IGNORE_UNUSED, 0),
+ CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(i2c1_isp, "i2c1_isp", "aclk200", E4X12_GATE_ISP0, 26,
- CLK_IGNORE_UNUSED, 0),
+ CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(mtcadc_isp, "mtcadc_isp", "aclk200", E4X12_GATE_ISP0, 27,
- CLK_IGNORE_UNUSED, 0),
+ CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(pwm_isp, "pwm_isp", "aclk200", E4X12_GATE_ISP0, 28,
- CLK_IGNORE_UNUSED, 0),
+ CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(wdt_isp, "wdt_isp", "aclk200", E4X12_GATE_ISP0, 30,
- CLK_IGNORE_UNUSED, 0),
+ CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(uart_isp, "uart_isp", "aclk200", E4X12_GATE_ISP0, 31,
- CLK_IGNORE_UNUSED, 0),
+ CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(asyncaxim, "asyncaxim", "aclk200", E4X12_GATE_ISP1, 0,
- CLK_IGNORE_UNUSED, 0),
+ CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(smmu_ispcx, "smmu_ispcx", "aclk200", E4X12_GATE_ISP1, 4,
- CLK_IGNORE_UNUSED, 0),
+ CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(spi0_isp, "spi0_isp", "aclk200", E4X12_GATE_ISP1, 12,
- CLK_IGNORE_UNUSED, 0),
+ CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(spi1_isp, "spi1_isp", "aclk200", E4X12_GATE_ISP1, 13,
- CLK_IGNORE_UNUSED, 0),
+ CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(g2d, "g2d", "aclk200", GATE_IP_DMC, 23, 0, 0),
};
diff --git a/drivers/clk/zynq/clkc.c b/drivers/clk/zynq/clkc.c
index 5c205b6..089d3e3 100644
--- a/drivers/clk/zynq/clkc.c
+++ b/drivers/clk/zynq/clkc.c
@@ -71,6 +71,7 @@
static DEFINE_SPINLOCK(ddrpll_lock);
static DEFINE_SPINLOCK(iopll_lock);
static DEFINE_SPINLOCK(armclk_lock);
+static DEFINE_SPINLOCK(swdtclk_lock);
static DEFINE_SPINLOCK(ddrclk_lock);
static DEFINE_SPINLOCK(dciclk_lock);
static DEFINE_SPINLOCK(gem0clk_lock);
@@ -293,7 +294,7 @@
}
clks[swdt] = clk_register_mux(NULL, clk_output_name[swdt],
swdt_ext_clk_mux_parents, 2, CLK_SET_RATE_PARENT,
- SLCR_SWDT_CLK_SEL, 0, 1, 0, &gem0clk_lock);
+ SLCR_SWDT_CLK_SEL, 0, 1, 0, &swdtclk_lock);
/* DDR clocks */
clk = clk_register_divider(NULL, "ddr2x_div", "ddrpll", 0,
@@ -364,8 +365,9 @@
CLK_SET_RATE_PARENT, SLCR_GEM0_CLK_CTRL, 20, 6,
CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
&gem0clk_lock);
- clk = clk_register_mux(NULL, "gem0_emio_mux", gem0_mux_parents, 2, 0,
- SLCR_GEM0_CLK_CTRL, 6, 1, 0, &gem0clk_lock);
+ clk = clk_register_mux(NULL, "gem0_emio_mux", gem0_mux_parents, 2,
+ CLK_SET_RATE_PARENT, SLCR_GEM0_CLK_CTRL, 6, 1, 0,
+ &gem0clk_lock);
clks[gem0] = clk_register_gate(NULL, clk_output_name[gem0],
"gem0_emio_mux", CLK_SET_RATE_PARENT,
SLCR_GEM0_CLK_CTRL, 0, 0, &gem0clk_lock);
@@ -386,8 +388,9 @@
CLK_SET_RATE_PARENT, SLCR_GEM1_CLK_CTRL, 20, 6,
CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
&gem1clk_lock);
- clk = clk_register_mux(NULL, "gem1_emio_mux", gem1_mux_parents, 2, 0,
- SLCR_GEM1_CLK_CTRL, 6, 1, 0, &gem1clk_lock);
+ clk = clk_register_mux(NULL, "gem1_emio_mux", gem1_mux_parents, 2,
+ CLK_SET_RATE_PARENT, SLCR_GEM1_CLK_CTRL, 6, 1, 0,
+ &gem1clk_lock);
clks[gem1] = clk_register_gate(NULL, clk_output_name[gem1],
"gem1_emio_mux", CLK_SET_RATE_PARENT,
SLCR_GEM1_CLK_CTRL, 0, 0, &gem1clk_lock);
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 0ceb2ef..f97cb3d 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -221,8 +221,8 @@
return count;
}
-static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf,
- size_t count)
+static ssize_t store_ignore_nice_load(struct dbs_data *dbs_data,
+ const char *buf, size_t count)
{
struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
unsigned int input, j;
@@ -235,10 +235,10 @@
if (input > 1)
input = 1;
- if (input == cs_tuners->ignore_nice) /* nothing to do */
+ if (input == cs_tuners->ignore_nice_load) /* nothing to do */
return count;
- cs_tuners->ignore_nice = input;
+ cs_tuners->ignore_nice_load = input;
/* we need to re-evaluate prev_cpu_idle */
for_each_online_cpu(j) {
@@ -246,7 +246,7 @@
dbs_info = &per_cpu(cs_cpu_dbs_info, j);
dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j,
&dbs_info->cdbs.prev_cpu_wall, 0);
- if (cs_tuners->ignore_nice)
+ if (cs_tuners->ignore_nice_load)
dbs_info->cdbs.prev_cpu_nice =
kcpustat_cpu(j).cpustat[CPUTIME_NICE];
}
@@ -279,7 +279,7 @@
show_store_one(cs, sampling_down_factor);
show_store_one(cs, up_threshold);
show_store_one(cs, down_threshold);
-show_store_one(cs, ignore_nice);
+show_store_one(cs, ignore_nice_load);
show_store_one(cs, freq_step);
declare_show_sampling_rate_min(cs);
@@ -287,7 +287,7 @@
gov_sys_pol_attr_rw(sampling_down_factor);
gov_sys_pol_attr_rw(up_threshold);
gov_sys_pol_attr_rw(down_threshold);
-gov_sys_pol_attr_rw(ignore_nice);
+gov_sys_pol_attr_rw(ignore_nice_load);
gov_sys_pol_attr_rw(freq_step);
gov_sys_pol_attr_ro(sampling_rate_min);
@@ -297,7 +297,7 @@
&sampling_down_factor_gov_sys.attr,
&up_threshold_gov_sys.attr,
&down_threshold_gov_sys.attr,
- &ignore_nice_gov_sys.attr,
+ &ignore_nice_load_gov_sys.attr,
&freq_step_gov_sys.attr,
NULL
};
@@ -313,7 +313,7 @@
&sampling_down_factor_gov_pol.attr,
&up_threshold_gov_pol.attr,
&down_threshold_gov_pol.attr,
- &ignore_nice_gov_pol.attr,
+ &ignore_nice_load_gov_pol.attr,
&freq_step_gov_pol.attr,
NULL
};
@@ -338,7 +338,7 @@
tuners->up_threshold = DEF_FREQUENCY_UP_THRESHOLD;
tuners->down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD;
tuners->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR;
- tuners->ignore_nice = 0;
+ tuners->ignore_nice_load = 0;
tuners->freq_step = DEF_FREQUENCY_STEP;
dbs_data->tuners = tuners;
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index 7b839a8..e59afaa 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -47,9 +47,9 @@
unsigned int j;
if (dbs_data->cdata->governor == GOV_ONDEMAND)
- ignore_nice = od_tuners->ignore_nice;
+ ignore_nice = od_tuners->ignore_nice_load;
else
- ignore_nice = cs_tuners->ignore_nice;
+ ignore_nice = cs_tuners->ignore_nice_load;
policy = cdbs->cur_policy;
@@ -298,12 +298,12 @@
cs_tuners = dbs_data->tuners;
cs_dbs_info = dbs_data->cdata->get_cpu_dbs_info_s(cpu);
sampling_rate = cs_tuners->sampling_rate;
- ignore_nice = cs_tuners->ignore_nice;
+ ignore_nice = cs_tuners->ignore_nice_load;
} else {
od_tuners = dbs_data->tuners;
od_dbs_info = dbs_data->cdata->get_cpu_dbs_info_s(cpu);
sampling_rate = od_tuners->sampling_rate;
- ignore_nice = od_tuners->ignore_nice;
+ ignore_nice = od_tuners->ignore_nice_load;
od_ops = dbs_data->cdata->gov_ops;
io_busy = od_tuners->io_is_busy;
}
diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
index 6663ec3..d5f12b4 100644
--- a/drivers/cpufreq/cpufreq_governor.h
+++ b/drivers/cpufreq/cpufreq_governor.h
@@ -165,7 +165,7 @@
/* Per policy Governers sysfs tunables */
struct od_dbs_tuners {
- unsigned int ignore_nice;
+ unsigned int ignore_nice_load;
unsigned int sampling_rate;
unsigned int sampling_down_factor;
unsigned int up_threshold;
@@ -175,7 +175,7 @@
};
struct cs_dbs_tuners {
- unsigned int ignore_nice;
+ unsigned int ignore_nice_load;
unsigned int sampling_rate;
unsigned int sampling_down_factor;
unsigned int up_threshold;
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 93eb5cb..c087347 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -403,8 +403,8 @@
return count;
}
-static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf,
- size_t count)
+static ssize_t store_ignore_nice_load(struct dbs_data *dbs_data,
+ const char *buf, size_t count)
{
struct od_dbs_tuners *od_tuners = dbs_data->tuners;
unsigned int input;
@@ -419,10 +419,10 @@
if (input > 1)
input = 1;
- if (input == od_tuners->ignore_nice) { /* nothing to do */
+ if (input == od_tuners->ignore_nice_load) { /* nothing to do */
return count;
}
- od_tuners->ignore_nice = input;
+ od_tuners->ignore_nice_load = input;
/* we need to re-evaluate prev_cpu_idle */
for_each_online_cpu(j) {
@@ -430,7 +430,7 @@
dbs_info = &per_cpu(od_cpu_dbs_info, j);
dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j,
&dbs_info->cdbs.prev_cpu_wall, od_tuners->io_is_busy);
- if (od_tuners->ignore_nice)
+ if (od_tuners->ignore_nice_load)
dbs_info->cdbs.prev_cpu_nice =
kcpustat_cpu(j).cpustat[CPUTIME_NICE];
@@ -461,7 +461,7 @@
show_store_one(od, io_is_busy);
show_store_one(od, up_threshold);
show_store_one(od, sampling_down_factor);
-show_store_one(od, ignore_nice);
+show_store_one(od, ignore_nice_load);
show_store_one(od, powersave_bias);
declare_show_sampling_rate_min(od);
@@ -469,7 +469,7 @@
gov_sys_pol_attr_rw(io_is_busy);
gov_sys_pol_attr_rw(up_threshold);
gov_sys_pol_attr_rw(sampling_down_factor);
-gov_sys_pol_attr_rw(ignore_nice);
+gov_sys_pol_attr_rw(ignore_nice_load);
gov_sys_pol_attr_rw(powersave_bias);
gov_sys_pol_attr_ro(sampling_rate_min);
@@ -478,7 +478,7 @@
&sampling_rate_gov_sys.attr,
&up_threshold_gov_sys.attr,
&sampling_down_factor_gov_sys.attr,
- &ignore_nice_gov_sys.attr,
+ &ignore_nice_load_gov_sys.attr,
&powersave_bias_gov_sys.attr,
&io_is_busy_gov_sys.attr,
NULL
@@ -494,7 +494,7 @@
&sampling_rate_gov_pol.attr,
&up_threshold_gov_pol.attr,
&sampling_down_factor_gov_pol.attr,
- &ignore_nice_gov_pol.attr,
+ &ignore_nice_load_gov_pol.attr,
&powersave_bias_gov_pol.attr,
&io_is_busy_gov_pol.attr,
NULL
@@ -544,7 +544,7 @@
}
tuners->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR;
- tuners->ignore_nice = 0;
+ tuners->ignore_nice_load = 0;
tuners->powersave_bias = default_powersave_bias;
tuners->io_is_busy = should_io_be_busy();
diff --git a/drivers/cpufreq/loongson2_cpufreq.c b/drivers/cpufreq/loongson2_cpufreq.c
index bb838b9..9536852c 100644
--- a/drivers/cpufreq/loongson2_cpufreq.c
+++ b/drivers/cpufreq/loongson2_cpufreq.c
@@ -118,11 +118,6 @@
clk_put(cpuclk);
return -EINVAL;
}
- ret = clk_set_rate(cpuclk, rate);
- if (ret) {
- clk_put(cpuclk);
- return ret;
- }
/* clock table init */
for (i = 2;
@@ -130,6 +125,12 @@
i++)
loongson2_clockmod_table[i].frequency = (rate * i) / 8;
+ ret = clk_set_rate(cpuclk, rate);
+ if (ret) {
+ clk_put(cpuclk);
+ return ret;
+ }
+
policy->cur = loongson2_cpufreq_get(policy->cpu);
cpufreq_frequency_table_get_attr(&loongson2_clockmod_table[0],
diff --git a/drivers/dma/sh/shdma.c b/drivers/dma/sh/shdma.c
index b67f45f..5039fbc 100644
--- a/drivers/dma/sh/shdma.c
+++ b/drivers/dma/sh/shdma.c
@@ -400,8 +400,8 @@
shdma_chan);
struct sh_dmae_desc *sh_desc = container_of(sdesc,
struct sh_dmae_desc, shdma_desc);
- return (sh_desc->hw.tcr - sh_dmae_readl(sh_chan, TCR)) <<
- sh_chan->xmit_shift;
+ return sh_desc->hw.tcr -
+ (sh_dmae_readl(sh_chan, TCR) << sh_chan->xmit_shift);
}
/* Called from error IRQ or NMI */
diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c
index 98d6708..6e8887fe 100644
--- a/drivers/gpu/drm/ast/ast_ttm.c
+++ b/drivers/gpu/drm/ast/ast_ttm.c
@@ -323,6 +323,7 @@
astbo->gem.driver_private = NULL;
astbo->bo.bdev = &ast->ttm.bdev;
+ astbo->bo.bdev->dev_mapping = dev->dev_mapping;
ast_ttm_placement(astbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c
index 0047012..69fd8f1 100644
--- a/drivers/gpu/drm/cirrus/cirrus_ttm.c
+++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c
@@ -328,6 +328,7 @@
cirrusbo->gem.driver_private = NULL;
cirrusbo->bo.bdev = &cirrus->ttm.bdev;
+ cirrusbo->bo.bdev->dev_mapping = dev->dev_mapping;
cirrus_ttm_placement(cirrusbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 8bcce78..f92da0a 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -708,7 +708,10 @@
/* Subtract time delta from raw timestamp to get final
* vblank_time timestamp for end of vblank.
*/
- etime = ktime_sub_ns(etime, delta_ns);
+ if (delta_ns < 0)
+ etime = ktime_add_ns(etime, -delta_ns);
+ else
+ etime = ktime_sub_ns(etime, delta_ns);
*vblank_time = ktime_to_timeval(etime);
DRM_DEBUG("crtc %d : v %d p(%d,%d)@ %ld.%ld -> %ld.%ld [e %d us, %d rep]\n",
diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
index 19e3660..3bc84145 100644
--- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c
+++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
@@ -500,7 +500,8 @@
&status))
goto log_fail;
- while (status == SDVO_CMD_STATUS_PENDING && retry--) {
+ while ((status == SDVO_CMD_STATUS_PENDING ||
+ status == SDVO_CMD_STATUS_TARGET_NOT_SPECIFIED) && retry--) {
udelay(15);
if (!psb_intel_sdvo_read_byte(psb_intel_sdvo,
SDVO_I2C_CMD_STATUS,
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index dc53a52..9e65783 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -85,9 +85,17 @@
struct sg_table *sg,
enum dma_data_direction dir)
{
+ struct drm_i915_gem_object *obj = attachment->dmabuf->priv;
+
+ mutex_lock(&obj->base.dev->struct_mutex);
+
dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
sg_free_table(sg);
kfree(sg);
+
+ i915_gem_object_unpin_pages(obj);
+
+ mutex_unlock(&obj->base.dev->struct_mutex);
}
static void i915_gem_dmabuf_release(struct dma_buf *dma_buf)
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index f2326fc..53cddd9 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -752,6 +752,8 @@
will not assert AGPBUSY# and will only
be delivered when out of C3. */
#define INSTPM_FORCE_ORDERING (1<<7) /* GEN6+ */
+#define INSTPM_TLB_INVALIDATE (1<<9)
+#define INSTPM_SYNC_FLUSH (1<<5)
#define ACTHD 0x020c8
#define FW_BLC 0x020d8
#define FW_BLC2 0x020dc
@@ -1856,10 +1858,16 @@
#define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2)
#define PORT_HOTPLUG_STAT (dev_priv->info->display_mmio_offset + 0x61114)
-/* HDMI/DP bits are gen4+ */
-#define PORTB_HOTPLUG_LIVE_STATUS (1 << 29)
+/*
+ * HDMI/DP bits are gen4+
+ *
+ * WARNING: Bspec for hpd status bits on gen4 seems to be completely confused.
+ * Please check the detailed lore in the commit message for for experimental
+ * evidence.
+ */
+#define PORTD_HOTPLUG_LIVE_STATUS (1 << 29)
#define PORTC_HOTPLUG_LIVE_STATUS (1 << 28)
-#define PORTD_HOTPLUG_LIVE_STATUS (1 << 27)
+#define PORTB_HOTPLUG_LIVE_STATUS (1 << 27)
#define PORTD_HOTPLUG_INT_STATUS (3 << 21)
#define PORTC_HOTPLUG_INT_STATUS (3 << 19)
#define PORTB_HOTPLUG_INT_STATUS (3 << 17)
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 5fb3058..be79f47 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -8269,9 +8269,11 @@
list_for_each_entry(encoder, &dev->mode_config.encoder_list,
base.head) {
+ enum pipe pipe;
if (encoder->base.crtc != &crtc->base)
continue;
- if (encoder->get_config)
+ if (encoder->get_config &&
+ encoder->get_hw_state(encoder, &pipe))
encoder->get_config(encoder, &pipe_config);
}
@@ -10040,6 +10042,8 @@
u32 power_well_driver;
+ int num_transcoders;
+
struct intel_cursor_error_state {
u32 control;
u32 position;
@@ -10048,16 +10052,7 @@
} cursor[I915_MAX_PIPES];
struct intel_pipe_error_state {
- enum transcoder cpu_transcoder;
- u32 conf;
u32 source;
-
- u32 htotal;
- u32 hblank;
- u32 hsync;
- u32 vtotal;
- u32 vblank;
- u32 vsync;
} pipe[I915_MAX_PIPES];
struct intel_plane_error_state {
@@ -10069,6 +10064,19 @@
u32 surface;
u32 tile_offset;
} plane[I915_MAX_PIPES];
+
+ struct intel_transcoder_error_state {
+ enum transcoder cpu_transcoder;
+
+ u32 conf;
+
+ u32 htotal;
+ u32 hblank;
+ u32 hsync;
+ u32 vtotal;
+ u32 vblank;
+ u32 vsync;
+ } transcoder[4];
};
struct intel_display_error_state *
@@ -10076,9 +10084,17 @@
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_display_error_state *error;
- enum transcoder cpu_transcoder;
+ int transcoders[] = {
+ TRANSCODER_A,
+ TRANSCODER_B,
+ TRANSCODER_C,
+ TRANSCODER_EDP,
+ };
int i;
+ if (INTEL_INFO(dev)->num_pipes == 0)
+ return NULL;
+
error = kmalloc(sizeof(*error), GFP_ATOMIC);
if (error == NULL)
return NULL;
@@ -10087,9 +10103,6 @@
error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER);
for_each_pipe(i) {
- cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, i);
- error->pipe[i].cpu_transcoder = cpu_transcoder;
-
if (INTEL_INFO(dev)->gen <= 6 || IS_VALLEYVIEW(dev)) {
error->cursor[i].control = I915_READ(CURCNTR(i));
error->cursor[i].position = I915_READ(CURPOS(i));
@@ -10113,14 +10126,25 @@
error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
}
- error->pipe[i].conf = I915_READ(PIPECONF(cpu_transcoder));
error->pipe[i].source = I915_READ(PIPESRC(i));
- error->pipe[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
- error->pipe[i].hblank = I915_READ(HBLANK(cpu_transcoder));
- error->pipe[i].hsync = I915_READ(HSYNC(cpu_transcoder));
- error->pipe[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
- error->pipe[i].vblank = I915_READ(VBLANK(cpu_transcoder));
- error->pipe[i].vsync = I915_READ(VSYNC(cpu_transcoder));
+ }
+
+ error->num_transcoders = INTEL_INFO(dev)->num_pipes;
+ if (HAS_DDI(dev_priv->dev))
+ error->num_transcoders++; /* Account for eDP. */
+
+ for (i = 0; i < error->num_transcoders; i++) {
+ enum transcoder cpu_transcoder = transcoders[i];
+
+ error->transcoder[i].cpu_transcoder = cpu_transcoder;
+
+ error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder));
+ error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
+ error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder));
+ error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder));
+ error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
+ error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder));
+ error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder));
}
/* In the code above we read the registers without checking if the power
@@ -10142,22 +10166,16 @@
{
int i;
+ if (!error)
+ return;
+
err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes);
if (HAS_POWER_WELL(dev))
err_printf(m, "PWR_WELL_CTL2: %08x\n",
error->power_well_driver);
for_each_pipe(i) {
err_printf(m, "Pipe [%d]:\n", i);
- err_printf(m, " CPU transcoder: %c\n",
- transcoder_name(error->pipe[i].cpu_transcoder));
- err_printf(m, " CONF: %08x\n", error->pipe[i].conf);
err_printf(m, " SRC: %08x\n", error->pipe[i].source);
- err_printf(m, " HTOTAL: %08x\n", error->pipe[i].htotal);
- err_printf(m, " HBLANK: %08x\n", error->pipe[i].hblank);
- err_printf(m, " HSYNC: %08x\n", error->pipe[i].hsync);
- err_printf(m, " VTOTAL: %08x\n", error->pipe[i].vtotal);
- err_printf(m, " VBLANK: %08x\n", error->pipe[i].vblank);
- err_printf(m, " VSYNC: %08x\n", error->pipe[i].vsync);
err_printf(m, "Plane [%d]:\n", i);
err_printf(m, " CNTR: %08x\n", error->plane[i].control);
@@ -10178,5 +10196,17 @@
err_printf(m, " POS: %08x\n", error->cursor[i].position);
err_printf(m, " BASE: %08x\n", error->cursor[i].base);
}
+
+ for (i = 0; i < error->num_transcoders; i++) {
+ err_printf(m, " CPU transcoder: %c\n",
+ transcoder_name(error->transcoder[i].cpu_transcoder));
+ err_printf(m, " CONF: %08x\n", error->transcoder[i].conf);
+ err_printf(m, " HTOTAL: %08x\n", error->transcoder[i].htotal);
+ err_printf(m, " HBLANK: %08x\n", error->transcoder[i].hblank);
+ err_printf(m, " HSYNC: %08x\n", error->transcoder[i].hsync);
+ err_printf(m, " VTOTAL: %08x\n", error->transcoder[i].vtotal);
+ err_printf(m, " VBLANK: %08x\n", error->transcoder[i].vblank);
+ err_printf(m, " VSYNC: %08x\n", error->transcoder[i].vsync);
+ }
}
#endif
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 67e2c1f..5950888 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -497,8 +497,11 @@
goto out;
}
- /* scale to hardware */
- level = level * freq / max;
+ /* scale to hardware, but be careful to not overflow */
+ if (freq < max)
+ level = level * freq / max;
+ else
+ level = freq / max * level;
dev_priv->backlight.level = level;
if (dev_priv->backlight.device)
@@ -515,6 +518,17 @@
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long flags;
+ /*
+ * Do not disable backlight on the vgaswitcheroo path. When switching
+ * away from i915, the other client may depend on i915 to handle the
+ * backlight. This will leave the backlight on unnecessarily when
+ * another client is not activated.
+ */
+ if (dev->switch_power_state == DRM_SWITCH_POWER_CHANGING) {
+ DRM_DEBUG_DRIVER("Skipping backlight disable on vga switch\n");
+ return;
+ }
+
spin_lock_irqsave(&dev_priv->backlight.lock, flags);
dev_priv->backlight.enabled = false;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index f895d15..b0e4a0b 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -5063,8 +5063,26 @@
}
} else {
if (enable_requested) {
+ unsigned long irqflags;
+ enum pipe p;
+
I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
+ POSTING_READ(HSW_PWR_WELL_DRIVER);
DRM_DEBUG_KMS("Requesting to disable the power well\n");
+
+ /*
+ * After this, the registers on the pipes that are part
+ * of the power well will become zero, so we have to
+ * adjust our counters according to that.
+ *
+ * FIXME: Should we do this in general in
+ * drm_vblank_post_modeset?
+ */
+ spin_lock_irqsave(&dev->vbl_lock, irqflags);
+ for_each_pipe(p)
+ if (p != PIPE_A)
+ dev->last_vblank[p] = 0;
+ spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
}
}
}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 664118d..079ef01 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -968,6 +968,18 @@
I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
POSTING_READ(mmio);
+
+ /* Flush the TLB for this page */
+ if (INTEL_INFO(dev)->gen >= 6) {
+ u32 reg = RING_INSTPM(ring->mmio_base);
+ I915_WRITE(reg,
+ _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
+ INSTPM_SYNC_FLUSH));
+ if (wait_for((I915_READ(reg) & INSTPM_SYNC_FLUSH) == 0,
+ 1000))
+ DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
+ ring->name);
+ }
}
static int
diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c
index 13878d5..d70e4a9 100644
--- a/drivers/gpu/drm/mgag200/mgag200_ttm.c
+++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c
@@ -323,6 +323,7 @@
mgabo->gem.driver_private = NULL;
mgabo->bo.bdev = &mdev->ttm.bdev;
+ mgabo->bo.bdev->dev_mapping = dev->dev_mapping;
mgag200_ttm_placement(mgabo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
diff --git a/drivers/gpu/drm/nouveau/core/core/mm.c b/drivers/gpu/drm/nouveau/core/core/mm.c
index d829172..7a4e089 100644
--- a/drivers/gpu/drm/nouveau/core/core/mm.c
+++ b/drivers/gpu/drm/nouveau/core/core/mm.c
@@ -98,6 +98,8 @@
u32 splitoff;
u32 s, e;
+ BUG_ON(!type);
+
list_for_each_entry(this, &mm->free, fl_entry) {
e = this->offset + this->length;
s = this->offset;
@@ -162,6 +164,8 @@
struct nouveau_mm_node *prev, *this, *next;
u32 mask = align - 1;
+ BUG_ON(!type);
+
list_for_each_entry_reverse(this, &mm->free, fl_entry) {
u32 e = this->offset + this->length;
u32 s = this->offset;
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/mc.h b/drivers/gpu/drm/nouveau/core/include/subdev/mc.h
index d550226..9d2cd20 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/mc.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/mc.h
@@ -20,8 +20,8 @@
return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_MC];
}
-#define nouveau_mc_create(p,e,o,d) \
- nouveau_mc_create_((p), (e), (o), sizeof(**d), (void **)d)
+#define nouveau_mc_create(p,e,o,m,d) \
+ nouveau_mc_create_((p), (e), (o), (m), sizeof(**d), (void **)d)
#define nouveau_mc_destroy(p) ({ \
struct nouveau_mc *pmc = (p); _nouveau_mc_dtor(nv_object(pmc)); \
})
@@ -33,7 +33,8 @@
})
int nouveau_mc_create_(struct nouveau_object *, struct nouveau_object *,
- struct nouveau_oclass *, int, void **);
+ struct nouveau_oclass *, const struct nouveau_mc_intr *,
+ int, void **);
void _nouveau_mc_dtor(struct nouveau_object *);
int _nouveau_mc_init(struct nouveau_object *);
int _nouveau_mc_fini(struct nouveau_object *, bool);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv49.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv49.c
index 19e3a9a..ab7ef0a 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv49.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv49.c
@@ -40,15 +40,15 @@
return ret;
switch (pfb914 & 0x00000003) {
- case 0x00000000: pfb->ram->type = NV_MEM_TYPE_DDR1; break;
- case 0x00000001: pfb->ram->type = NV_MEM_TYPE_DDR2; break;
- case 0x00000002: pfb->ram->type = NV_MEM_TYPE_GDDR3; break;
+ case 0x00000000: ram->type = NV_MEM_TYPE_DDR1; break;
+ case 0x00000001: ram->type = NV_MEM_TYPE_DDR2; break;
+ case 0x00000002: ram->type = NV_MEM_TYPE_GDDR3; break;
case 0x00000003: break;
}
- pfb->ram->size = nv_rd32(pfb, 0x10020c) & 0xff000000;
- pfb->ram->parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1;
- pfb->ram->tags = nv_rd32(pfb, 0x100320);
+ ram->size = nv_rd32(pfb, 0x10020c) & 0xff000000;
+ ram->parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1;
+ ram->tags = nv_rd32(pfb, 0x100320);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv4e.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv4e.c
index 7192aa6..63a6aab 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv4e.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv4e.c
@@ -38,8 +38,8 @@
if (ret)
return ret;
- pfb->ram->size = nv_rd32(pfb, 0x10020c) & 0xff000000;
- pfb->ram->type = NV_MEM_TYPE_STOLEN;
+ ram->size = nv_rd32(pfb, 0x10020c) & 0xff000000;
+ ram->type = NV_MEM_TYPE_STOLEN;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c
index bcca883..cce65cc 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c
@@ -30,8 +30,9 @@
struct nouveau_ltcg base;
u32 part_nr;
u32 subp_nr;
- struct nouveau_mm tags;
u32 num_tags;
+ u32 tag_base;
+ struct nouveau_mm tags;
struct nouveau_mm_node *tag_ram;
};
@@ -117,10 +118,6 @@
u32 tag_size, tag_margin, tag_align;
int ret;
- nv_wr32(priv, 0x17e8d8, priv->part_nr);
- if (nv_device(pfb)->card_type >= NV_E0)
- nv_wr32(priv, 0x17e000, priv->part_nr);
-
/* tags for 1/4 of VRAM should be enough (8192/4 per GiB of VRAM) */
priv->num_tags = (pfb->ram->size >> 17) / 4;
if (priv->num_tags > (1 << 17))
@@ -142,7 +139,7 @@
tag_size += tag_align;
tag_size = (tag_size + 0xfff) >> 12; /* round up */
- ret = nouveau_mm_tail(&pfb->vram, 0, tag_size, tag_size, 1,
+ ret = nouveau_mm_tail(&pfb->vram, 1, tag_size, tag_size, 1,
&priv->tag_ram);
if (ret) {
priv->num_tags = 0;
@@ -152,7 +149,7 @@
tag_base += tag_align - 1;
ret = do_div(tag_base, tag_align);
- nv_wr32(priv, 0x17e8d4, tag_base);
+ priv->tag_base = tag_base;
}
ret = nouveau_mm_init(&priv->tags, 0, priv->num_tags, 1);
@@ -182,8 +179,6 @@
}
priv->subp_nr = nv_rd32(priv, 0x17e8dc) >> 28;
- nv_mask(priv, 0x17e820, 0x00100000, 0x00000000); /* INTR_EN &= ~0x10 */
-
ret = nvc0_ltcg_init_tag_ram(pfb, priv);
if (ret)
return ret;
@@ -209,13 +204,32 @@
nouveau_ltcg_destroy(ltcg);
}
+static int
+nvc0_ltcg_init(struct nouveau_object *object)
+{
+ struct nouveau_ltcg *ltcg = (struct nouveau_ltcg *)object;
+ struct nvc0_ltcg_priv *priv = (struct nvc0_ltcg_priv *)ltcg;
+ int ret;
+
+ ret = nouveau_ltcg_init(ltcg);
+ if (ret)
+ return ret;
+
+ nv_mask(priv, 0x17e820, 0x00100000, 0x00000000); /* INTR_EN &= ~0x10 */
+ nv_wr32(priv, 0x17e8d8, priv->part_nr);
+ if (nv_device(ltcg)->card_type >= NV_E0)
+ nv_wr32(priv, 0x17e000, priv->part_nr);
+ nv_wr32(priv, 0x17e8d4, priv->tag_base);
+ return 0;
+}
+
struct nouveau_oclass
nvc0_ltcg_oclass = {
.handle = NV_SUBDEV(LTCG, 0xc0),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nvc0_ltcg_ctor,
.dtor = nvc0_ltcg_dtor,
- .init = _nouveau_ltcg_init,
+ .init = nvc0_ltcg_init,
.fini = _nouveau_ltcg_fini,
},
};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/base.c b/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
index 1c0330b..ec9cd6f 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
@@ -80,7 +80,9 @@
int
nouveau_mc_create_(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, int length, void **pobject)
+ struct nouveau_oclass *oclass,
+ const struct nouveau_mc_intr *intr_map,
+ int length, void **pobject)
{
struct nouveau_device *device = nv_device(parent);
struct nouveau_mc *pmc;
@@ -92,6 +94,8 @@
if (ret)
return ret;
+ pmc->intr_map = intr_map;
+
ret = request_irq(device->pdev->irq, nouveau_mc_intr,
IRQF_SHARED, "nouveau", pmc);
if (ret < 0)
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c
index 8c76971..64aa4ed 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c
@@ -50,12 +50,11 @@
struct nv04_mc_priv *priv;
int ret;
- ret = nouveau_mc_create(parent, engine, oclass, &priv);
+ ret = nouveau_mc_create(parent, engine, oclass, nv04_mc_intr, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
- priv->base.intr_map = nv04_mc_intr;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c
index 5191937..d989178 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c
@@ -36,12 +36,11 @@
struct nv44_mc_priv *priv;
int ret;
- ret = nouveau_mc_create(parent, engine, oclass, &priv);
+ ret = nouveau_mc_create(parent, engine, oclass, nv04_mc_intr, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
- priv->base.intr_map = nv04_mc_intr;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
index f25fc5f..2b1afe2 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
@@ -53,12 +53,11 @@
struct nv50_mc_priv *priv;
int ret;
- ret = nouveau_mc_create(parent, engine, oclass, &priv);
+ ret = nouveau_mc_create(parent, engine, oclass, nv50_mc_intr, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
- priv->base.intr_map = nv50_mc_intr;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
index e82fd21..0d57b4d 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
@@ -54,12 +54,11 @@
struct nv98_mc_priv *priv;
int ret;
- ret = nouveau_mc_create(parent, engine, oclass, &priv);
+ ret = nouveau_mc_create(parent, engine, oclass, nv98_mc_intr, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
- priv->base.intr_map = nv98_mc_intr;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
index c5da3ba..104175c 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
@@ -57,12 +57,11 @@
struct nvc0_mc_priv *priv;
int ret;
- ret = nouveau_mc_create(parent, engine, oclass, &priv);
+ ret = nouveau_mc_create(parent, engine, oclass, nvc0_mc_intr, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
- priv->base.intr_map = nvc0_mc_intr;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
index 0782bd2..6a13ffb 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
@@ -606,6 +606,24 @@
regp->ramdac_a34 = 0x1;
}
+static int
+nv_crtc_swap_fbs(struct drm_crtc *crtc, struct drm_framebuffer *old_fb)
+{
+ struct nv04_display *disp = nv04_display(crtc->dev);
+ struct nouveau_framebuffer *nvfb = nouveau_framebuffer(crtc->fb);
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ int ret;
+
+ ret = nouveau_bo_pin(nvfb->nvbo, TTM_PL_FLAG_VRAM);
+ if (ret == 0) {
+ if (disp->image[nv_crtc->index])
+ nouveau_bo_unpin(disp->image[nv_crtc->index]);
+ nouveau_bo_ref(nvfb->nvbo, &disp->image[nv_crtc->index]);
+ }
+
+ return ret;
+}
+
/**
* Sets up registers for the given mode/adjusted_mode pair.
*
@@ -622,10 +640,15 @@
struct drm_device *dev = crtc->dev;
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
struct nouveau_drm *drm = nouveau_drm(dev);
+ int ret;
NV_DEBUG(drm, "CTRC mode on CRTC %d:\n", nv_crtc->index);
drm_mode_debug_printmodeline(adjusted_mode);
+ ret = nv_crtc_swap_fbs(crtc, old_fb);
+ if (ret)
+ return ret;
+
/* unlock must come after turning off FP_TG_CONTROL in output_prepare */
nv_lock_vga_crtc_shadow(dev, nv_crtc->index, -1);
@@ -722,6 +745,7 @@
static void nv_crtc_destroy(struct drm_crtc *crtc)
{
+ struct nv04_display *disp = nv04_display(crtc->dev);
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
if (!nv_crtc)
@@ -729,6 +753,10 @@
drm_crtc_cleanup(crtc);
+ if (disp->image[nv_crtc->index])
+ nouveau_bo_unpin(disp->image[nv_crtc->index]);
+ nouveau_bo_ref(NULL, &disp->image[nv_crtc->index]);
+
nouveau_bo_unmap(nv_crtc->cursor.nvbo);
nouveau_bo_unpin(nv_crtc->cursor.nvbo);
nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
@@ -754,6 +782,16 @@
}
static void
+nv_crtc_disable(struct drm_crtc *crtc)
+{
+ struct nv04_display *disp = nv04_display(crtc->dev);
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ if (disp->image[nv_crtc->index])
+ nouveau_bo_unpin(disp->image[nv_crtc->index]);
+ nouveau_bo_ref(NULL, &disp->image[nv_crtc->index]);
+}
+
+static void
nv_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, uint32_t start,
uint32_t size)
{
@@ -791,7 +829,6 @@
struct drm_framebuffer *drm_fb;
struct nouveau_framebuffer *fb;
int arb_burst, arb_lwm;
- int ret;
NV_DEBUG(drm, "index %d\n", nv_crtc->index);
@@ -801,10 +838,8 @@
return 0;
}
-
/* If atomic, we want to switch to the fb we were passed, so
- * now we update pointers to do that. (We don't pin; just
- * assume we're already pinned and update the base address.)
+ * now we update pointers to do that.
*/
if (atomic) {
drm_fb = passed_fb;
@@ -812,17 +847,6 @@
} else {
drm_fb = crtc->fb;
fb = nouveau_framebuffer(crtc->fb);
- /* If not atomic, we can go ahead and pin, and unpin the
- * old fb we were passed.
- */
- ret = nouveau_bo_pin(fb->nvbo, TTM_PL_FLAG_VRAM);
- if (ret)
- return ret;
-
- if (passed_fb) {
- struct nouveau_framebuffer *ofb = nouveau_framebuffer(passed_fb);
- nouveau_bo_unpin(ofb->nvbo);
- }
}
nv_crtc->fb.offset = fb->nvbo->bo.offset;
@@ -877,6 +901,9 @@
nv04_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb)
{
+ int ret = nv_crtc_swap_fbs(crtc, old_fb);
+ if (ret)
+ return ret;
return nv04_crtc_do_mode_set_base(crtc, old_fb, x, y, false);
}
@@ -1027,6 +1054,7 @@
.mode_set_base = nv04_crtc_mode_set_base,
.mode_set_base_atomic = nv04_crtc_mode_set_base_atomic,
.load_lut = nv_crtc_gamma_load,
+ .disable = nv_crtc_disable,
};
int
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.h b/drivers/gpu/drm/nouveau/dispnv04/disp.h
index a0a031d..9928187 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.h
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.h
@@ -81,6 +81,7 @@
uint32_t saved_vga_font[4][16384];
uint32_t dac_users[4];
struct nouveau_object *core;
+ struct nouveau_bo *image[2];
};
static inline struct nv04_display *
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 907d20e..a03e75d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -577,6 +577,9 @@
ret = nv50_display_flip_next(crtc, fb, chan, 0);
if (ret)
goto fail_unreserve;
+ } else {
+ struct nv04_display *dispnv04 = nv04_display(dev);
+ nouveau_bo_ref(new_bo, &dispnv04->image[nouveau_crtc(crtc)->index]);
}
ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence);
diff --git a/drivers/gpu/drm/nouveau/nv40_pm.c b/drivers/gpu/drm/nouveau/nv40_pm.c
index 3af5bcd..625f80d 100644
--- a/drivers/gpu/drm/nouveau/nv40_pm.c
+++ b/drivers/gpu/drm/nouveau/nv40_pm.c
@@ -131,7 +131,7 @@
if (clk < pll->vco1.max_freq)
pll->vco2.max_freq = 0;
- pclk->pll_calc(pclk, pll, clk, &coef);
+ ret = pclk->pll_calc(pclk, pll, clk, &coef);
if (ret == 0)
return -ERANGE;
diff --git a/drivers/gpu/drm/radeon/btc_dpm.c b/drivers/gpu/drm/radeon/btc_dpm.c
index 0bfd55e..9953e1f 100644
--- a/drivers/gpu/drm/radeon/btc_dpm.c
+++ b/drivers/gpu/drm/radeon/btc_dpm.c
@@ -2548,9 +2548,6 @@
{
struct rv7xx_power_info *pi;
struct evergreen_power_info *eg_pi;
- int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
- u16 data_offset, size;
- u8 frev, crev;
struct atom_clock_dividers dividers;
int ret;
@@ -2633,16 +2630,7 @@
eg_pi->vddci_control =
radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDCI, 0);
- if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
- &frev, &crev, &data_offset)) {
- pi->sclk_ss = true;
- pi->mclk_ss = true;
- pi->dynamic_ss = true;
- } else {
- pi->sclk_ss = false;
- pi->mclk_ss = false;
- pi->dynamic_ss = true;
- }
+ rv770_get_engine_memory_ss(rdev);
pi->asi = RV770_ASI_DFLT;
pi->pasi = CYPRESS_HASI_DFLT;
@@ -2659,8 +2647,7 @@
pi->dynamic_pcie_gen2 = true;
- if (pi->gfx_clock_gating &&
- (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE))
+ if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE)
pi->thermal_protection = true;
else
pi->thermal_protection = false;
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 6dacec4..8928bd1 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -2587,9 +2587,11 @@
if (rdev->wb.enabled) {
rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]);
} else {
+ mutex_lock(&rdev->srbm_mutex);
cik_srbm_select(rdev, ring->me, ring->pipe, ring->queue, 0);
rptr = RREG32(CP_HQD_PQ_RPTR);
cik_srbm_select(rdev, 0, 0, 0, 0);
+ mutex_unlock(&rdev->srbm_mutex);
}
rptr = (rptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift;
@@ -2604,9 +2606,11 @@
if (rdev->wb.enabled) {
wptr = le32_to_cpu(rdev->wb.wb[ring->wptr_offs/4]);
} else {
+ mutex_lock(&rdev->srbm_mutex);
cik_srbm_select(rdev, ring->me, ring->pipe, ring->queue, 0);
wptr = RREG32(CP_HQD_PQ_WPTR);
cik_srbm_select(rdev, 0, 0, 0, 0);
+ mutex_unlock(&rdev->srbm_mutex);
}
wptr = (wptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift;
@@ -2897,6 +2901,7 @@
WREG32(CP_CPF_DEBUG, tmp);
/* init the pipes */
+ mutex_lock(&rdev->srbm_mutex);
for (i = 0; i < (rdev->mec.num_pipe * rdev->mec.num_mec); i++) {
int me = (i < 4) ? 1 : 2;
int pipe = (i < 4) ? i : (i - 4);
@@ -2919,6 +2924,7 @@
WREG32(CP_HPD_EOP_CONTROL, tmp);
}
cik_srbm_select(rdev, 0, 0, 0, 0);
+ mutex_unlock(&rdev->srbm_mutex);
/* init the queues. Just two for now. */
for (i = 0; i < 2; i++) {
@@ -2972,6 +2978,7 @@
mqd->static_thread_mgmt23[0] = 0xffffffff;
mqd->static_thread_mgmt23[1] = 0xffffffff;
+ mutex_lock(&rdev->srbm_mutex);
cik_srbm_select(rdev, rdev->ring[idx].me,
rdev->ring[idx].pipe,
rdev->ring[idx].queue, 0);
@@ -3099,6 +3106,7 @@
WREG32(CP_HQD_ACTIVE, mqd->queue_state.cp_hqd_active);
cik_srbm_select(rdev, 0, 0, 0, 0);
+ mutex_unlock(&rdev->srbm_mutex);
radeon_bo_kunmap(rdev->ring[idx].mqd_obj);
radeon_bo_unreserve(rdev->ring[idx].mqd_obj);
@@ -4320,6 +4328,7 @@
/* XXX SH_MEM regs */
/* where to put LDS, scratch, GPUVM in FSA64 space */
+ mutex_lock(&rdev->srbm_mutex);
for (i = 0; i < 16; i++) {
cik_srbm_select(rdev, 0, 0, 0, i);
/* CP and shaders */
@@ -4335,6 +4344,7 @@
/* XXX SDMA RLC - todo */
}
cik_srbm_select(rdev, 0, 0, 0, 0);
+ mutex_unlock(&rdev->srbm_mutex);
cik_pcie_gart_tlb_flush(rdev);
DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
@@ -5954,6 +5964,8 @@
struct radeon_ring *ring;
int r;
+ cik_mc_program(rdev);
+
if (rdev->flags & RADEON_IS_IGP) {
if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
!rdev->mec_fw || !rdev->sdma_fw || !rdev->rlc_fw) {
@@ -5985,7 +5997,6 @@
if (r)
return r;
- cik_mc_program(rdev);
r = cik_pcie_gart_enable(rdev);
if (r)
return r;
@@ -6194,7 +6205,7 @@
radeon_vm_manager_fini(rdev);
cik_cp_enable(rdev, false);
cik_sdma_enable(rdev, false);
- r600_uvd_rbc_stop(rdev);
+ r600_uvd_stop(rdev);
radeon_uvd_suspend(rdev);
cik_irq_suspend(rdev);
radeon_wb_disable(rdev);
@@ -6358,6 +6369,7 @@
radeon_vm_manager_fini(rdev);
radeon_ib_pool_fini(rdev);
radeon_irq_kms_fini(rdev);
+ r600_uvd_stop(rdev);
radeon_uvd_fini(rdev);
cik_pcie_gart_fini(rdev);
r600_vram_scratch_fini(rdev);
@@ -6978,7 +6990,7 @@
/* programm the VCPU memory controller bits 0-27 */
addr = rdev->uvd.gpu_addr >> 3;
- size = RADEON_GPU_PAGE_ALIGN(rdev->uvd.fw_size + 4) >> 3;
+ size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 4) >> 3;
WREG32(UVD_VCPU_CACHE_OFFSET0, addr);
WREG32(UVD_VCPU_CACHE_SIZE0, size);
diff --git a/drivers/gpu/drm/radeon/cypress_dpm.c b/drivers/gpu/drm/radeon/cypress_dpm.c
index 9bcdd17..7e5d0b5 100644
--- a/drivers/gpu/drm/radeon/cypress_dpm.c
+++ b/drivers/gpu/drm/radeon/cypress_dpm.c
@@ -2038,9 +2038,6 @@
{
struct rv7xx_power_info *pi;
struct evergreen_power_info *eg_pi;
- int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
- uint16_t data_offset, size;
- uint8_t frev, crev;
struct atom_clock_dividers dividers;
int ret;
@@ -2092,16 +2089,7 @@
eg_pi->vddci_control =
radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDCI, 0);
- if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
- &frev, &crev, &data_offset)) {
- pi->sclk_ss = true;
- pi->mclk_ss = true;
- pi->dynamic_ss = true;
- } else {
- pi->sclk_ss = false;
- pi->mclk_ss = false;
- pi->dynamic_ss = true;
- }
+ rv770_get_engine_memory_ss(rdev);
pi->asi = RV770_ASI_DFLT;
pi->pasi = CYPRESS_HASI_DFLT;
@@ -2122,8 +2110,7 @@
pi->dynamic_pcie_gen2 = true;
- if (pi->gfx_clock_gating &&
- (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE))
+ if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE)
pi->thermal_protection = true;
else
pi->thermal_protection = false;
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 038dcac..d5b49e3 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -5106,6 +5106,8 @@
/* enable aspm */
evergreen_program_aspm(rdev);
+ evergreen_mc_program(rdev);
+
if (ASIC_IS_DCE5(rdev)) {
if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
r = ni_init_microcode(rdev);
@@ -5133,7 +5135,6 @@
if (r)
return r;
- evergreen_mc_program(rdev);
if (rdev->flags & RADEON_IS_AGP) {
evergreen_agp_enable(rdev);
} else {
@@ -5291,10 +5292,10 @@
int evergreen_suspend(struct radeon_device *rdev)
{
r600_audio_fini(rdev);
+ r600_uvd_stop(rdev);
radeon_uvd_suspend(rdev);
r700_cp_stop(rdev);
r600_dma_stop(rdev);
- r600_uvd_rbc_stop(rdev);
evergreen_irq_suspend(rdev);
radeon_wb_disable(rdev);
evergreen_pcie_gart_disable(rdev);
@@ -5429,6 +5430,7 @@
radeon_ib_pool_fini(rdev);
radeon_irq_kms_fini(rdev);
evergreen_pcie_gart_fini(rdev);
+ r600_uvd_stop(rdev);
radeon_uvd_fini(rdev);
r600_vram_scratch_fini(rdev);
radeon_gem_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
index bb9ea36..b0e2800 100644
--- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
+++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
@@ -148,18 +148,40 @@
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
u32 base_rate = 24000;
+ u32 max_ratio = clock / base_rate;
+ u32 dto_phase;
+ u32 dto_modulo = clock;
+ u32 wallclock_ratio;
+ u32 dto_cntl;
if (!dig || !dig->afmt)
return;
+ if (max_ratio >= 8) {
+ dto_phase = 192 * 1000;
+ wallclock_ratio = 3;
+ } else if (max_ratio >= 4) {
+ dto_phase = 96 * 1000;
+ wallclock_ratio = 2;
+ } else if (max_ratio >= 2) {
+ dto_phase = 48 * 1000;
+ wallclock_ratio = 1;
+ } else {
+ dto_phase = 24 * 1000;
+ wallclock_ratio = 0;
+ }
+ dto_cntl = RREG32(DCCG_AUDIO_DTO0_CNTL) & ~DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK;
+ dto_cntl |= DCCG_AUDIO_DTO_WALLCLOCK_RATIO(wallclock_ratio);
+ WREG32(DCCG_AUDIO_DTO0_CNTL, dto_cntl);
+
/* XXX two dtos; generally use dto0 for hdmi */
/* Express [24MHz / target pixel clock] as an exact rational
* number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE
* is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
*/
WREG32(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO0_SOURCE_SEL(radeon_crtc->crtc_id));
- WREG32(DCCG_AUDIO_DTO0_PHASE, base_rate * 100);
- WREG32(DCCG_AUDIO_DTO0_MODULE, clock * 100);
+ WREG32(DCCG_AUDIO_DTO0_PHASE, dto_phase);
+ WREG32(DCCG_AUDIO_DTO0_MODULE, dto_modulo);
}
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index a7baf67..0d582ac 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -497,6 +497,9 @@
#define DCCG_AUDIO_DTO0_MODULE 0x05b4
#define DCCG_AUDIO_DTO0_LOAD 0x05b8
#define DCCG_AUDIO_DTO0_CNTL 0x05bc
+# define DCCG_AUDIO_DTO_WALLCLOCK_RATIO(x) (((x) & 7) << 0)
+# define DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK 7
+# define DCCG_AUDIO_DTO_WALLCLOCK_RATIO_SHIFT 0
#define DCCG_AUDIO_DTO1_PHASE 0x05c0
#define DCCG_AUDIO_DTO1_MODULE 0x05c4
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 56bd4f3..ccb4f8b5 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -794,9 +794,13 @@
if ((rdev->family >= CHIP_BARTS) && (rdev->family <= CHIP_CAYMAN)) {
snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
- if (err)
- goto out;
- if (rdev->smc_fw->size != smc_req_size) {
+ if (err) {
+ printk(KERN_ERR
+ "smc: error loading firmware \"%s\"\n",
+ fw_name);
+ release_firmware(rdev->smc_fw);
+ rdev->smc_fw = NULL;
+ } else if (rdev->smc_fw->size != smc_req_size) {
printk(KERN_ERR
"ni_mc: Bogus length %zu in firmware \"%s\"\n",
rdev->mc_fw->size, fw_name);
@@ -2079,6 +2083,8 @@
/* enable aspm */
evergreen_program_aspm(rdev);
+ evergreen_mc_program(rdev);
+
if (rdev->flags & RADEON_IS_IGP) {
if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
r = ni_init_microcode(rdev);
@@ -2107,7 +2113,6 @@
if (r)
return r;
- evergreen_mc_program(rdev);
r = cayman_pcie_gart_enable(rdev);
if (r)
return r;
@@ -2286,7 +2291,7 @@
radeon_vm_manager_fini(rdev);
cayman_cp_enable(rdev, false);
cayman_dma_stop(rdev);
- r600_uvd_rbc_stop(rdev);
+ r600_uvd_stop(rdev);
radeon_uvd_suspend(rdev);
evergreen_irq_suspend(rdev);
radeon_wb_disable(rdev);
@@ -2418,6 +2423,7 @@
radeon_vm_manager_fini(rdev);
radeon_ib_pool_fini(rdev);
radeon_irq_kms_fini(rdev);
+ r600_uvd_stop(rdev);
radeon_uvd_fini(rdev);
cayman_pcie_gart_fini(rdev);
r600_vram_scratch_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c
index 4f9b9bc..f0f5f74 100644
--- a/drivers/gpu/drm/radeon/ni_dpm.c
+++ b/drivers/gpu/drm/radeon/ni_dpm.c
@@ -4067,9 +4067,6 @@
struct rv7xx_power_info *pi;
struct evergreen_power_info *eg_pi;
struct ni_power_info *ni_pi;
- int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
- u16 data_offset, size;
- u8 frev, crev;
struct atom_clock_dividers dividers;
int ret;
@@ -4162,16 +4159,7 @@
eg_pi->vddci_control =
radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDCI, 0);
- if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
- &frev, &crev, &data_offset)) {
- pi->sclk_ss = true;
- pi->mclk_ss = true;
- pi->dynamic_ss = true;
- } else {
- pi->sclk_ss = false;
- pi->mclk_ss = false;
- pi->dynamic_ss = true;
- }
+ rv770_get_engine_memory_ss(rdev);
pi->asi = RV770_ASI_DFLT;
pi->pasi = CYPRESS_HASI_DFLT;
@@ -4188,8 +4176,7 @@
pi->dynamic_pcie_gen2 = true;
- if (pi->gfx_clock_gating &&
- (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE))
+ if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE)
pi->thermal_protection = true;
else
pi->thermal_protection = false;
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 10f712e..e66e720 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -2299,9 +2299,13 @@
if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_HEMLOCK)) {
snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", smc_chip_name);
err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
- if (err)
- goto out;
- if (rdev->smc_fw->size != smc_req_size) {
+ if (err) {
+ printk(KERN_ERR
+ "smc: error loading firmware \"%s\"\n",
+ fw_name);
+ release_firmware(rdev->smc_fw);
+ rdev->smc_fw = NULL;
+ } else if (rdev->smc_fw->size != smc_req_size) {
printk(KERN_ERR
"smc: Bogus length %zu in firmware \"%s\"\n",
rdev->smc_fw->size, fw_name);
@@ -2697,12 +2701,29 @@
return 0;
}
-void r600_uvd_rbc_stop(struct radeon_device *rdev)
+void r600_uvd_stop(struct radeon_device *rdev)
{
struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
/* force RBC into idle state */
WREG32(UVD_RBC_RB_CNTL, 0x11010101);
+
+ /* Stall UMC and register bus before resetting VCPU */
+ WREG32_P(UVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
+ WREG32_P(UVD_RB_ARB_CTRL, 1 << 3, ~(1 << 3));
+ mdelay(1);
+
+ /* put VCPU into reset */
+ WREG32(UVD_SOFT_RESET, VCPU_SOFT_RESET);
+ mdelay(5);
+
+ /* disable VCPU clock */
+ WREG32(UVD_VCPU_CNTL, 0x0);
+
+ /* Unstall UMC and register bus */
+ WREG32_P(UVD_LMI_CTRL2, 0, ~(1 << 8));
+ WREG32_P(UVD_RB_ARB_CTRL, 0, ~(1 << 3));
+
ring->ready = false;
}
@@ -2722,6 +2743,11 @@
/* disable interupt */
WREG32_P(UVD_MASTINT_EN, 0, ~(1 << 1));
+ /* Stall UMC and register bus before resetting VCPU */
+ WREG32_P(UVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
+ WREG32_P(UVD_RB_ARB_CTRL, 1 << 3, ~(1 << 3));
+ mdelay(1);
+
/* put LMI, VCPU, RBC etc... into reset */
WREG32(UVD_SOFT_RESET, LMI_SOFT_RESET | VCPU_SOFT_RESET |
LBSI_SOFT_RESET | RBC_SOFT_RESET | CSM_SOFT_RESET |
@@ -2751,10 +2777,6 @@
WREG32(UVD_MPC_SET_ALU, 0);
WREG32(UVD_MPC_SET_MUX, 0x88);
- /* Stall UMC */
- WREG32_P(UVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
- WREG32_P(UVD_RB_ARB_CTRL, 1 << 3, ~(1 << 3));
-
/* take all subblocks out of reset, except VCPU */
WREG32(UVD_SOFT_RESET, VCPU_SOFT_RESET);
mdelay(5);
@@ -3312,6 +3334,8 @@
/* enable pcie gen2 link */
r600_pcie_gen2_enable(rdev);
+ r600_mc_program(rdev);
+
if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
r = r600_init_microcode(rdev);
if (r) {
@@ -3324,7 +3348,6 @@
if (r)
return r;
- r600_mc_program(rdev);
if (rdev->flags & RADEON_IS_AGP) {
r600_agp_enable(rdev);
} else {
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index f48240b..f264df5 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -226,10 +226,29 @@
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
u32 base_rate = 24000;
+ u32 max_ratio = clock / base_rate;
+ u32 dto_phase;
+ u32 dto_modulo = clock;
+ u32 wallclock_ratio;
+ u32 dto_cntl;
if (!dig || !dig->afmt)
return;
+ if (max_ratio >= 8) {
+ dto_phase = 192 * 1000;
+ wallclock_ratio = 3;
+ } else if (max_ratio >= 4) {
+ dto_phase = 96 * 1000;
+ wallclock_ratio = 2;
+ } else if (max_ratio >= 2) {
+ dto_phase = 48 * 1000;
+ wallclock_ratio = 1;
+ } else {
+ dto_phase = 24 * 1000;
+ wallclock_ratio = 0;
+ }
+
/* there are two DTOs selected by DCCG_AUDIO_DTO_SELECT.
* doesn't matter which one you use. Just use the first one.
*/
@@ -242,9 +261,21 @@
/* according to the reg specs, this should DCE3.2 only, but in
* practice it seems to cover DCE3.0 as well.
*/
- WREG32(DCCG_AUDIO_DTO0_PHASE, base_rate * 100);
- WREG32(DCCG_AUDIO_DTO0_MODULE, clock * 100);
- WREG32(DCCG_AUDIO_DTO_SELECT, 0); /* select DTO0 */
+ if (dig->dig_encoder == 0) {
+ dto_cntl = RREG32(DCCG_AUDIO_DTO0_CNTL) & ~DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK;
+ dto_cntl |= DCCG_AUDIO_DTO_WALLCLOCK_RATIO(wallclock_ratio);
+ WREG32(DCCG_AUDIO_DTO0_CNTL, dto_cntl);
+ WREG32(DCCG_AUDIO_DTO0_PHASE, dto_phase);
+ WREG32(DCCG_AUDIO_DTO0_MODULE, dto_modulo);
+ WREG32(DCCG_AUDIO_DTO_SELECT, 0); /* select DTO0 */
+ } else {
+ dto_cntl = RREG32(DCCG_AUDIO_DTO1_CNTL) & ~DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK;
+ dto_cntl |= DCCG_AUDIO_DTO_WALLCLOCK_RATIO(wallclock_ratio);
+ WREG32(DCCG_AUDIO_DTO1_CNTL, dto_cntl);
+ WREG32(DCCG_AUDIO_DTO1_PHASE, dto_phase);
+ WREG32(DCCG_AUDIO_DTO1_MODULE, dto_modulo);
+ WREG32(DCCG_AUDIO_DTO_SELECT, 1); /* select DTO1 */
+ }
} else {
/* according to the reg specs, this should be DCE2.0 and DCE3.0 */
WREG32(AUDIO_DTO, AUDIO_DTO_PHASE(base_rate / 10) |
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index 8e3fe81..7c78083 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -933,6 +933,9 @@
#define DCCG_AUDIO_DTO0_LOAD 0x051c
# define DTO_LOAD (1 << 31)
#define DCCG_AUDIO_DTO0_CNTL 0x0520
+# define DCCG_AUDIO_DTO_WALLCLOCK_RATIO(x) (((x) & 7) << 0)
+# define DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK 7
+# define DCCG_AUDIO_DTO_WALLCLOCK_RATIO_SHIFT 0
#define DCCG_AUDIO_DTO1_PHASE 0x0524
#define DCCG_AUDIO_DTO1_MODULE 0x0528
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 2f08219..9f19259 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -1468,7 +1468,6 @@
void *cpu_addr;
uint64_t gpu_addr;
void *saved_bo;
- unsigned fw_size;
atomic_t handles[RADEON_MAX_UVD_HANDLES];
struct drm_file *filp[RADEON_MAX_UVD_HANDLES];
struct delayed_work idle_work;
@@ -2066,6 +2065,7 @@
const struct firmware *mec_fw; /* CIK MEC firmware */
const struct firmware *sdma_fw; /* CIK SDMA firmware */
const struct firmware *smc_fw; /* SMC firmware */
+ const struct firmware *uvd_fw; /* UVD firmware */
struct r600_blit r600_blit;
struct r600_vram_scratch vram_scratch;
int msi_enabled; /* msi enabled */
@@ -2095,6 +2095,8 @@
/* ACPI interface */
struct radeon_atif atif;
struct radeon_atcs atcs;
+ /* srbm instance registers */
+ struct mutex srbm_mutex;
};
int radeon_device_init(struct radeon_device *rdev,
@@ -2161,7 +2163,7 @@
WREG32(reg, tmp_); \
} while (0)
#define WREG32_AND(reg, and) WREG32_P(reg, 0, and)
-#define WREG32_OR(reg, or) WREG32_P(reg, or, ~or)
+#define WREG32_OR(reg, or) WREG32_P(reg, or, ~(or))
#define WREG32_PLL_P(reg, val, mask) \
do { \
uint32_t tmp_ = RREG32_PLL(reg); \
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 902479f..3d61d5a 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -441,7 +441,7 @@
/* uvd */
int r600_uvd_init(struct radeon_device *rdev);
int r600_uvd_rbc_start(struct radeon_device *rdev);
-void r600_uvd_rbc_stop(struct radeon_device *rdev);
+void r600_uvd_stop(struct radeon_device *rdev);
int r600_uvd_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
void r600_uvd_fence_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 82335e3..63398ae 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1163,6 +1163,7 @@
mutex_init(&rdev->gem.mutex);
mutex_init(&rdev->pm.mutex);
mutex_init(&rdev->gpu_clock_mutex);
+ mutex_init(&rdev->srbm_mutex);
init_rwsem(&rdev->pm.mclk_lock);
init_rwsem(&rdev->exclusive_lock);
init_waitqueue_head(&rdev->irq.vblank_queue);
@@ -1519,6 +1520,7 @@
radeon_save_bios_scratch_regs(rdev);
/* block TTM */
resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
+ radeon_pm_suspend(rdev);
radeon_suspend(rdev);
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
@@ -1564,6 +1566,7 @@
}
}
+ radeon_pm_resume(rdev);
drm_helper_resume_force_mode(rdev->ddev);
ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 7ddb0ef..ddb8f8e 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -782,7 +782,7 @@
} else {
/* put fence directly behind firmware */
- index = ALIGN(rdev->uvd.fw_size, 8);
+ index = ALIGN(rdev->uvd_fw->size, 8);
rdev->fence_drv[ring].cpu_addr = rdev->uvd.cpu_addr + index;
rdev->fence_drv[ring].gpu_addr = rdev->uvd.gpu_addr + index;
}
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index 6a51d94..b990b1a 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -207,7 +207,6 @@
if (rdev->gart.robj == NULL) {
return;
}
- radeon_gart_table_vram_unpin(rdev);
radeon_bo_unref(&rdev->gart.robj);
}
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index f374c46..c557850 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -1176,7 +1176,14 @@
case CHIP_VERDE:
case CHIP_OLAND:
case CHIP_HAINAN:
- if (radeon_dpm == 1)
+ /* DPM requires the RLC, RV770+ dGPU requires SMC */
+ if (!rdev->rlc_fw)
+ rdev->pm.pm_method = PM_METHOD_PROFILE;
+ else if ((rdev->family >= CHIP_RV770) &&
+ (!(rdev->flags & RADEON_IS_IGP)) &&
+ (!rdev->smc_fw))
+ rdev->pm.pm_method = PM_METHOD_PROFILE;
+ else if (radeon_dpm == 1)
rdev->pm.pm_method = PM_METHOD_DPM;
else
rdev->pm.pm_method = PM_METHOD_PROFILE;
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index 414fd14..b79f4f5 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -56,7 +56,6 @@
int radeon_uvd_init(struct radeon_device *rdev)
{
- const struct firmware *fw;
unsigned long bo_size;
const char *fw_name;
int i, r;
@@ -105,14 +104,14 @@
return -EINVAL;
}
- r = request_firmware(&fw, fw_name, rdev->dev);
+ r = request_firmware(&rdev->uvd_fw, fw_name, rdev->dev);
if (r) {
dev_err(rdev->dev, "radeon_uvd: Can't load firmware \"%s\"\n",
fw_name);
return r;
}
- bo_size = RADEON_GPU_PAGE_ALIGN(fw->size + 8) +
+ bo_size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 8) +
RADEON_UVD_STACK_SIZE + RADEON_UVD_HEAP_SIZE;
r = radeon_bo_create(rdev, bo_size, PAGE_SIZE, true,
RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->uvd.vcpu_bo);
@@ -145,12 +144,6 @@
radeon_bo_unreserve(rdev->uvd.vcpu_bo);
- rdev->uvd.fw_size = fw->size;
- memset(rdev->uvd.cpu_addr, 0, bo_size);
- memcpy(rdev->uvd.cpu_addr, fw->data, fw->size);
-
- release_firmware(fw);
-
for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
atomic_set(&rdev->uvd.handles[i], 0);
rdev->uvd.filp[i] = NULL;
@@ -174,33 +167,60 @@
}
radeon_bo_unref(&rdev->uvd.vcpu_bo);
+
+ release_firmware(rdev->uvd_fw);
}
int radeon_uvd_suspend(struct radeon_device *rdev)
{
unsigned size;
+ void *ptr;
+ int i;
if (rdev->uvd.vcpu_bo == NULL)
return 0;
+ for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i)
+ if (atomic_read(&rdev->uvd.handles[i]))
+ break;
+
+ if (i == RADEON_MAX_UVD_HANDLES)
+ return 0;
+
size = radeon_bo_size(rdev->uvd.vcpu_bo);
+ size -= rdev->uvd_fw->size;
+
+ ptr = rdev->uvd.cpu_addr;
+ ptr += rdev->uvd_fw->size;
+
rdev->uvd.saved_bo = kmalloc(size, GFP_KERNEL);
- memcpy(rdev->uvd.saved_bo, rdev->uvd.cpu_addr, size);
+ memcpy(rdev->uvd.saved_bo, ptr, size);
return 0;
}
int radeon_uvd_resume(struct radeon_device *rdev)
{
+ unsigned size;
+ void *ptr;
+
if (rdev->uvd.vcpu_bo == NULL)
return -EINVAL;
+ memcpy(rdev->uvd.cpu_addr, rdev->uvd_fw->data, rdev->uvd_fw->size);
+
+ size = radeon_bo_size(rdev->uvd.vcpu_bo);
+ size -= rdev->uvd_fw->size;
+
+ ptr = rdev->uvd.cpu_addr;
+ ptr += rdev->uvd_fw->size;
+
if (rdev->uvd.saved_bo != NULL) {
- unsigned size = radeon_bo_size(rdev->uvd.vcpu_bo);
- memcpy(rdev->uvd.cpu_addr, rdev->uvd.saved_bo, size);
+ memcpy(ptr, rdev->uvd.saved_bo, size);
kfree(rdev->uvd.saved_bo);
rdev->uvd.saved_bo = NULL;
- }
+ } else
+ memset(ptr, 0, size);
return 0;
}
@@ -215,8 +235,8 @@
{
int i, r;
for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
- if (rdev->uvd.filp[i] == filp) {
- uint32_t handle = atomic_read(&rdev->uvd.handles[i]);
+ uint32_t handle = atomic_read(&rdev->uvd.handles[i]);
+ if (handle != 0 && rdev->uvd.filp[i] == filp) {
struct radeon_fence *fence;
r = radeon_uvd_get_destroy_msg(rdev,
@@ -336,9 +356,19 @@
return -EINVAL;
}
+ if (bo->tbo.sync_obj) {
+ r = radeon_fence_wait(bo->tbo.sync_obj, false);
+ if (r) {
+ DRM_ERROR("Failed waiting for UVD message (%d)!\n", r);
+ return r;
+ }
+ }
+
r = radeon_bo_kmap(bo, &ptr);
- if (r)
+ if (r) {
+ DRM_ERROR("Failed mapping the UVD message (%d)!\n", r);
return r;
+ }
msg = ptr + offset;
@@ -364,8 +394,14 @@
radeon_bo_kunmap(bo);
return 0;
} else {
- /* it's a create msg, no special handling needed */
radeon_bo_kunmap(bo);
+
+ if (msg_type != 0) {
+ DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type);
+ return -EINVAL;
+ }
+
+ /* it's a create msg, no special handling needed */
}
/* create or decode, validate the handle */
@@ -388,7 +424,7 @@
static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
int data0, int data1,
- unsigned buf_sizes[])
+ unsigned buf_sizes[], bool *has_msg_cmd)
{
struct radeon_cs_chunk *relocs_chunk;
struct radeon_cs_reloc *reloc;
@@ -417,7 +453,7 @@
if (cmd < 0x4) {
if ((end - start) < buf_sizes[cmd]) {
- DRM_ERROR("buffer to small (%d / %d)!\n",
+ DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd,
(unsigned)(end - start), buf_sizes[cmd]);
return -EINVAL;
}
@@ -442,9 +478,17 @@
}
if (cmd == 0) {
+ if (*has_msg_cmd) {
+ DRM_ERROR("More than one message in a UVD-IB!\n");
+ return -EINVAL;
+ }
+ *has_msg_cmd = true;
r = radeon_uvd_cs_msg(p, reloc->robj, offset, buf_sizes);
if (r)
return r;
+ } else if (!*has_msg_cmd) {
+ DRM_ERROR("Message needed before other commands are send!\n");
+ return -EINVAL;
}
return 0;
@@ -453,7 +497,8 @@
static int radeon_uvd_cs_reg(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt,
int *data0, int *data1,
- unsigned buf_sizes[])
+ unsigned buf_sizes[],
+ bool *has_msg_cmd)
{
int i, r;
@@ -467,7 +512,8 @@
*data1 = p->idx;
break;
case UVD_GPCOM_VCPU_CMD:
- r = radeon_uvd_cs_reloc(p, *data0, *data1, buf_sizes);
+ r = radeon_uvd_cs_reloc(p, *data0, *data1,
+ buf_sizes, has_msg_cmd);
if (r)
return r;
break;
@@ -488,6 +534,9 @@
struct radeon_cs_packet pkt;
int r, data0 = 0, data1 = 0;
+ /* does the IB has a msg command */
+ bool has_msg_cmd = false;
+
/* minimum buffer sizes */
unsigned buf_sizes[] = {
[0x00000000] = 2048,
@@ -514,8 +563,8 @@
return r;
switch (pkt.type) {
case RADEON_PACKET_TYPE0:
- r = radeon_uvd_cs_reg(p, &pkt, &data0,
- &data1, buf_sizes);
+ r = radeon_uvd_cs_reg(p, &pkt, &data0, &data1,
+ buf_sizes, &has_msg_cmd);
if (r)
return r;
break;
@@ -527,6 +576,12 @@
return -EINVAL;
}
} while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
+
+ if (!has_msg_cmd) {
+ DRM_ERROR("UVD-IBs need a msg command!\n");
+ return -EINVAL;
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/radeon/rv6xx_dpm.c b/drivers/gpu/drm/radeon/rv6xx_dpm.c
index 363018c..bdd888b 100644
--- a/drivers/gpu/drm/radeon/rv6xx_dpm.c
+++ b/drivers/gpu/drm/radeon/rv6xx_dpm.c
@@ -1944,9 +1944,7 @@
int rv6xx_dpm_init(struct radeon_device *rdev)
{
- int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
- uint16_t data_offset, size;
- uint8_t frev, crev;
+ struct radeon_atom_ss ss;
struct atom_clock_dividers dividers;
struct rv6xx_power_info *pi;
int ret;
@@ -1989,16 +1987,18 @@
pi->gfx_clock_gating = true;
- if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
- &frev, &crev, &data_offset)) {
- pi->sclk_ss = true;
- pi->mclk_ss = true;
+ pi->sclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss,
+ ASIC_INTERNAL_ENGINE_SS, 0);
+ pi->mclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss,
+ ASIC_INTERNAL_MEMORY_SS, 0);
+
+ /* Disable sclk ss, causes hangs on a lot of systems */
+ pi->sclk_ss = false;
+
+ if (pi->sclk_ss || pi->mclk_ss)
pi->dynamic_ss = true;
- } else {
- pi->sclk_ss = false;
- pi->mclk_ss = false;
+ else
pi->dynamic_ss = false;
- }
pi->dynamic_pcie_gen2 = true;
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 30ea14e..f5e92cf 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -744,10 +744,10 @@
(const u32)ARRAY_SIZE(r7xx_golden_dyn_gpr_registers));
radeon_program_register_sequence(rdev,
rv730_golden_registers,
- (const u32)ARRAY_SIZE(rv770_golden_registers));
+ (const u32)ARRAY_SIZE(rv730_golden_registers));
radeon_program_register_sequence(rdev,
rv730_mgcg_init,
- (const u32)ARRAY_SIZE(rv770_mgcg_init));
+ (const u32)ARRAY_SIZE(rv730_mgcg_init));
break;
case CHIP_RV710:
radeon_program_register_sequence(rdev,
@@ -758,18 +758,18 @@
(const u32)ARRAY_SIZE(r7xx_golden_dyn_gpr_registers));
radeon_program_register_sequence(rdev,
rv710_golden_registers,
- (const u32)ARRAY_SIZE(rv770_golden_registers));
+ (const u32)ARRAY_SIZE(rv710_golden_registers));
radeon_program_register_sequence(rdev,
rv710_mgcg_init,
- (const u32)ARRAY_SIZE(rv770_mgcg_init));
+ (const u32)ARRAY_SIZE(rv710_mgcg_init));
break;
case CHIP_RV740:
radeon_program_register_sequence(rdev,
rv740_golden_registers,
- (const u32)ARRAY_SIZE(rv770_golden_registers));
+ (const u32)ARRAY_SIZE(rv740_golden_registers));
radeon_program_register_sequence(rdev,
rv740_mgcg_init,
- (const u32)ARRAY_SIZE(rv770_mgcg_init));
+ (const u32)ARRAY_SIZE(rv740_mgcg_init));
break;
default:
break;
@@ -813,7 +813,7 @@
/* programm the VCPU memory controller bits 0-27 */
addr = rdev->uvd.gpu_addr >> 3;
- size = RADEON_GPU_PAGE_ALIGN(rdev->uvd.fw_size + 4) >> 3;
+ size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 4) >> 3;
WREG32(UVD_VCPU_CACHE_OFFSET0, addr);
WREG32(UVD_VCPU_CACHE_SIZE0, size);
@@ -1829,6 +1829,8 @@
/* enable pcie gen2 link */
rv770_pcie_gen2_enable(rdev);
+ rv770_mc_program(rdev);
+
if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
r = r600_init_microcode(rdev);
if (r) {
@@ -1841,7 +1843,6 @@
if (r)
return r;
- rv770_mc_program(rdev);
if (rdev->flags & RADEON_IS_AGP) {
rv770_agp_enable(rdev);
} else {
@@ -1983,6 +1984,7 @@
int rv770_suspend(struct radeon_device *rdev)
{
r600_audio_fini(rdev);
+ r600_uvd_stop(rdev);
radeon_uvd_suspend(rdev);
r700_cp_stop(rdev);
r600_dma_stop(rdev);
@@ -2098,6 +2100,7 @@
radeon_ib_pool_fini(rdev);
radeon_irq_kms_fini(rdev);
rv770_pcie_gart_fini(rdev);
+ r600_uvd_stop(rdev);
radeon_uvd_fini(rdev);
r600_vram_scratch_fini(rdev);
radeon_gem_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c
index 2d34792..094c67a 100644
--- a/drivers/gpu/drm/radeon/rv770_dpm.c
+++ b/drivers/gpu/drm/radeon/rv770_dpm.c
@@ -2319,12 +2319,25 @@
return 0;
}
+void rv770_get_engine_memory_ss(struct radeon_device *rdev)
+{
+ struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+ struct radeon_atom_ss ss;
+
+ pi->sclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss,
+ ASIC_INTERNAL_ENGINE_SS, 0);
+ pi->mclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss,
+ ASIC_INTERNAL_MEMORY_SS, 0);
+
+ if (pi->sclk_ss || pi->mclk_ss)
+ pi->dynamic_ss = true;
+ else
+ pi->dynamic_ss = false;
+}
+
int rv770_dpm_init(struct radeon_device *rdev)
{
struct rv7xx_power_info *pi;
- int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
- uint16_t data_offset, size;
- uint8_t frev, crev;
struct atom_clock_dividers dividers;
int ret;
@@ -2369,16 +2382,7 @@
pi->mvdd_control =
radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_MVDDC, 0);
- if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
- &frev, &crev, &data_offset)) {
- pi->sclk_ss = true;
- pi->mclk_ss = true;
- pi->dynamic_ss = true;
- } else {
- pi->sclk_ss = false;
- pi->mclk_ss = false;
- pi->dynamic_ss = false;
- }
+ rv770_get_engine_memory_ss(rdev);
pi->asi = RV770_ASI_DFLT;
pi->pasi = RV770_HASI_DFLT;
@@ -2393,8 +2397,7 @@
pi->dynamic_pcie_gen2 = true;
- if (pi->gfx_clock_gating &&
- (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE))
+ if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE)
pi->thermal_protection = true;
else
pi->thermal_protection = false;
diff --git a/drivers/gpu/drm/radeon/rv770_dpm.h b/drivers/gpu/drm/radeon/rv770_dpm.h
index 96b1b2a..9244eff 100644
--- a/drivers/gpu/drm/radeon/rv770_dpm.h
+++ b/drivers/gpu/drm/radeon/rv770_dpm.h
@@ -275,6 +275,7 @@
void rv770_set_uvd_clock_after_set_eng_clock(struct radeon_device *rdev,
struct radeon_ps *new_ps,
struct radeon_ps *old_ps);
+void rv770_get_engine_memory_ss(struct radeon_device *rdev);
/* smc */
int rv770_read_smc_soft_register(struct radeon_device *rdev,
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 6ca9046..daa8d2d 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -1663,9 +1663,13 @@
snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
- if (err)
- goto out;
- if (rdev->smc_fw->size != smc_req_size) {
+ if (err) {
+ printk(KERN_ERR
+ "smc: error loading firmware \"%s\"\n",
+ fw_name);
+ release_firmware(rdev->smc_fw);
+ rdev->smc_fw = NULL;
+ } else if (rdev->smc_fw->size != smc_req_size) {
printk(KERN_ERR
"si_smc: Bogus length %zu in firmware \"%s\"\n",
rdev->smc_fw->size, fw_name);
@@ -6418,6 +6422,8 @@
/* enable aspm */
si_program_aspm(rdev);
+ si_mc_program(rdev);
+
if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
!rdev->rlc_fw || !rdev->mc_fw) {
r = si_init_microcode(rdev);
@@ -6437,7 +6443,6 @@
if (r)
return r;
- si_mc_program(rdev);
r = si_pcie_gart_enable(rdev);
if (r)
return r;
@@ -6621,7 +6626,7 @@
si_cp_enable(rdev, false);
cayman_dma_stop(rdev);
if (rdev->has_uvd) {
- r600_uvd_rbc_stop(rdev);
+ r600_uvd_stop(rdev);
radeon_uvd_suspend(rdev);
}
si_irq_suspend(rdev);
@@ -6763,8 +6768,10 @@
radeon_vm_manager_fini(rdev);
radeon_ib_pool_fini(rdev);
radeon_irq_kms_fini(rdev);
- if (rdev->has_uvd)
+ if (rdev->has_uvd) {
+ r600_uvd_stop(rdev);
radeon_uvd_fini(rdev);
+ }
si_pcie_gart_fini(rdev);
r600_vram_scratch_fini(rdev);
radeon_gem_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 4182557..88699e3 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -2903,7 +2903,8 @@
{
struct ni_ps *ps = ni_get_ps(rps);
struct radeon_clock_and_voltage_limits *max_limits;
- bool disable_mclk_switching;
+ bool disable_mclk_switching = false;
+ bool disable_sclk_switching = false;
u32 mclk, sclk;
u16 vddc, vddci;
int i;
@@ -2911,8 +2912,11 @@
if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
ni_dpm_vblank_too_short(rdev))
disable_mclk_switching = true;
- else
- disable_mclk_switching = false;
+
+ if (rps->vclk || rps->dclk) {
+ disable_mclk_switching = true;
+ disable_sclk_switching = true;
+ }
if (rdev->pm.dpm.ac_power)
max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
@@ -2940,27 +2944,43 @@
if (disable_mclk_switching) {
mclk = ps->performance_levels[ps->performance_level_count - 1].mclk;
- sclk = ps->performance_levels[0].sclk;
- vddc = ps->performance_levels[0].vddc;
vddci = ps->performance_levels[ps->performance_level_count - 1].vddci;
} else {
- sclk = ps->performance_levels[0].sclk;
mclk = ps->performance_levels[0].mclk;
- vddc = ps->performance_levels[0].vddc;
vddci = ps->performance_levels[0].vddci;
}
+ if (disable_sclk_switching) {
+ sclk = ps->performance_levels[ps->performance_level_count - 1].sclk;
+ vddc = ps->performance_levels[ps->performance_level_count - 1].vddc;
+ } else {
+ sclk = ps->performance_levels[0].sclk;
+ vddc = ps->performance_levels[0].vddc;
+ }
+
/* adjusted low state */
ps->performance_levels[0].sclk = sclk;
ps->performance_levels[0].mclk = mclk;
ps->performance_levels[0].vddc = vddc;
ps->performance_levels[0].vddci = vddci;
- for (i = 1; i < ps->performance_level_count; i++) {
- if (ps->performance_levels[i].sclk < ps->performance_levels[i - 1].sclk)
- ps->performance_levels[i].sclk = ps->performance_levels[i - 1].sclk;
- if (ps->performance_levels[i].vddc < ps->performance_levels[i - 1].vddc)
- ps->performance_levels[i].vddc = ps->performance_levels[i - 1].vddc;
+ if (disable_sclk_switching) {
+ sclk = ps->performance_levels[0].sclk;
+ for (i = 1; i < ps->performance_level_count; i++) {
+ if (sclk < ps->performance_levels[i].sclk)
+ sclk = ps->performance_levels[i].sclk;
+ }
+ for (i = 0; i < ps->performance_level_count; i++) {
+ ps->performance_levels[i].sclk = sclk;
+ ps->performance_levels[i].vddc = vddc;
+ }
+ } else {
+ for (i = 1; i < ps->performance_level_count; i++) {
+ if (ps->performance_levels[i].sclk < ps->performance_levels[i - 1].sclk)
+ ps->performance_levels[i].sclk = ps->performance_levels[i - 1].sclk;
+ if (ps->performance_levels[i].vddc < ps->performance_levels[i - 1].vddc)
+ ps->performance_levels[i].vddc = ps->performance_levels[i - 1].vddc;
+ }
}
if (disable_mclk_switching) {
@@ -6253,9 +6273,6 @@
struct evergreen_power_info *eg_pi;
struct ni_power_info *ni_pi;
struct si_power_info *si_pi;
- int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
- u16 data_offset, size;
- u8 frev, crev;
struct atom_clock_dividers dividers;
int ret;
u32 mask;
@@ -6346,16 +6363,7 @@
si_pi->vddc_phase_shed_control =
radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC, VOLTAGE_OBJ_PHASE_LUT);
- if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
- &frev, &crev, &data_offset)) {
- pi->sclk_ss = true;
- pi->mclk_ss = true;
- pi->dynamic_ss = true;
- } else {
- pi->sclk_ss = false;
- pi->mclk_ss = false;
- pi->dynamic_ss = true;
- }
+ rv770_get_engine_memory_ss(rdev);
pi->asi = RV770_ASI_DFLT;
pi->pasi = CYPRESS_HASI_DFLT;
@@ -6366,8 +6374,7 @@
eg_pi->sclk_deep_sleep = true;
si_pi->sclk_deep_sleep_above_low = false;
- if (pi->gfx_clock_gating &&
- (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE))
+ if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE)
pi->thermal_protection = true;
else
pi->thermal_protection = false;
diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
index 7a57648..cd33084 100644
--- a/drivers/hid/hid-logitech-dj.c
+++ b/drivers/hid/hid-logitech-dj.c
@@ -488,8 +488,6 @@
if (djrcv_dev->querying_devices)
return 0;
- djrcv_dev->querying_devices = true;
-
dj_report = kzalloc(sizeof(struct dj_report), GFP_KERNEL);
if (!dj_report)
return -ENOMEM;
diff --git a/drivers/hwmon/adt7470.c b/drivers/hwmon/adt7470.c
index 0f34bca..6099f50 100644
--- a/drivers/hwmon/adt7470.c
+++ b/drivers/hwmon/adt7470.c
@@ -215,7 +215,7 @@
u16 value)
{
return i2c_smbus_write_byte_data(client, reg, value & 0xFF)
- && i2c_smbus_write_byte_data(client, reg + 1, value >> 8);
+ || i2c_smbus_write_byte_data(client, reg + 1, value >> 8);
}
static void adt7470_init_client(struct i2c_client *client)
diff --git a/drivers/i2c/busses/i2c-kempld.c b/drivers/i2c/busses/i2c-kempld.c
index ccec916..af8f65f 100644
--- a/drivers/i2c/busses/i2c-kempld.c
+++ b/drivers/i2c/busses/i2c-kempld.c
@@ -246,9 +246,9 @@
bus_frequency = KEMPLD_I2C_FREQ_MAX;
if (pld->info.spec_major == 1)
- prescale = pld->pld_clock / bus_frequency * 5 - 1000;
+ prescale = pld->pld_clock / (bus_frequency * 5) - 1000;
else
- prescale = pld->pld_clock / bus_frequency * 4 - 3000;
+ prescale = pld->pld_clock / (bus_frequency * 4) - 3000;
if (prescale < 0)
prescale = 0;
diff --git a/drivers/i2c/busses/i2c-mxs.c b/drivers/i2c/busses/i2c-mxs.c
index df8ff5a..e2e9a0d 100644
--- a/drivers/i2c/busses/i2c-mxs.c
+++ b/drivers/i2c/busses/i2c-mxs.c
@@ -493,7 +493,7 @@
* based on this empirical measurement and a lot of previous frobbing.
*/
i2c->cmd_err = 0;
- if (msg->len < 8) {
+ if (0) { /* disable PIO mode until a proper fix is made */
ret = mxs_i2c_pio_setup_xfer(adap, msg, flags);
if (ret)
mxs_i2c_reset(i2c);
diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c
index 0ad208a..3ceac3e 100644
--- a/drivers/iio/adc/ti_am335x_adc.c
+++ b/drivers/iio/adc/ti_am335x_adc.c
@@ -60,7 +60,6 @@
{
unsigned int stepconfig;
int i, steps;
- u32 step_en;
/*
* There are 16 configurable steps and 8 analog input
@@ -86,8 +85,7 @@
adc_dev->channel_step[i] = steps;
steps++;
}
- step_en = get_adc_step_mask(adc_dev);
- am335x_tsc_se_set(adc_dev->mfd_tscadc, step_en);
+
}
static const char * const chan_name_ain[] = {
@@ -142,10 +140,22 @@
int *val, int *val2, long mask)
{
struct tiadc_device *adc_dev = iio_priv(indio_dev);
- int i;
- unsigned int fifo1count, read;
+ int i, map_val;
+ unsigned int fifo1count, read, stepid;
u32 step = UINT_MAX;
bool found = false;
+ u32 step_en;
+ unsigned long timeout = jiffies + usecs_to_jiffies
+ (IDLE_TIMEOUT * adc_dev->channels);
+ step_en = get_adc_step_mask(adc_dev);
+ am335x_tsc_se_set(adc_dev->mfd_tscadc, step_en);
+
+ /* Wait for ADC sequencer to complete sampling */
+ while (tiadc_readl(adc_dev, REG_ADCFSM) & SEQ_STATUS) {
+ if (time_after(jiffies, timeout))
+ return -EAGAIN;
+ }
+ map_val = chan->channel + TOTAL_CHANNELS;
/*
* When the sub-system is first enabled,
@@ -170,12 +180,16 @@
fifo1count = tiadc_readl(adc_dev, REG_FIFO1CNT);
for (i = 0; i < fifo1count; i++) {
read = tiadc_readl(adc_dev, REG_FIFO1);
- if (read >> 16 == step) {
- *val = read & 0xfff;
+ stepid = read & FIFOREAD_CHNLID_MASK;
+ stepid = stepid >> 0x10;
+
+ if (stepid == map_val) {
+ read = read & FIFOREAD_DATA_MASK;
found = true;
+ *val = read;
}
}
- am335x_tsc_se_update(adc_dev->mfd_tscadc);
+
if (found == false)
return -EBUSY;
return IIO_VAL_INT;
diff --git a/drivers/iio/industrialio-trigger.c b/drivers/iio/industrialio-trigger.c
index ea8a414..0dd9bb8 100644
--- a/drivers/iio/industrialio-trigger.c
+++ b/drivers/iio/industrialio-trigger.c
@@ -127,12 +127,17 @@
void iio_trigger_poll(struct iio_trigger *trig, s64 time)
{
int i;
- if (!trig->use_count)
- for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++)
- if (trig->subirqs[i].enabled) {
- trig->use_count++;
+
+ if (!atomic_read(&trig->use_count)) {
+ atomic_set(&trig->use_count, CONFIG_IIO_CONSUMERS_PER_TRIGGER);
+
+ for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) {
+ if (trig->subirqs[i].enabled)
generic_handle_irq(trig->subirq_base + i);
- }
+ else
+ iio_trigger_notify_done(trig);
+ }
+ }
}
EXPORT_SYMBOL(iio_trigger_poll);
@@ -146,19 +151,24 @@
void iio_trigger_poll_chained(struct iio_trigger *trig, s64 time)
{
int i;
- if (!trig->use_count)
- for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++)
- if (trig->subirqs[i].enabled) {
- trig->use_count++;
+
+ if (!atomic_read(&trig->use_count)) {
+ atomic_set(&trig->use_count, CONFIG_IIO_CONSUMERS_PER_TRIGGER);
+
+ for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) {
+ if (trig->subirqs[i].enabled)
handle_nested_irq(trig->subirq_base + i);
- }
+ else
+ iio_trigger_notify_done(trig);
+ }
+ }
}
EXPORT_SYMBOL(iio_trigger_poll_chained);
void iio_trigger_notify_done(struct iio_trigger *trig)
{
- trig->use_count--;
- if (trig->use_count == 0 && trig->ops && trig->ops->try_reenable)
+ if (atomic_dec_and_test(&trig->use_count) && trig->ops &&
+ trig->ops->try_reenable)
if (trig->ops->try_reenable(trig))
/* Missed an interrupt so launch new poll now */
iio_trigger_poll(trig, 0);
diff --git a/drivers/iio/light/adjd_s311.c b/drivers/iio/light/adjd_s311.c
index 5f4749e..c1cd569 100644
--- a/drivers/iio/light/adjd_s311.c
+++ b/drivers/iio/light/adjd_s311.c
@@ -232,7 +232,8 @@
switch (mask) {
case IIO_CHAN_INFO_RAW:
- ret = adjd_s311_read_data(indio_dev, chan->address, val);
+ ret = adjd_s311_read_data(indio_dev,
+ ADJD_S311_DATA_REG(chan->address), val);
if (ret < 0)
return ret;
return IIO_VAL_INT;
diff --git a/drivers/md/dm-cache-policy-mq.c b/drivers/md/dm-cache-policy-mq.c
index dc112a7..4296155 100644
--- a/drivers/md/dm-cache-policy-mq.c
+++ b/drivers/md/dm-cache-policy-mq.c
@@ -959,23 +959,21 @@
return r;
}
-static void remove_mapping(struct mq_policy *mq, dm_oblock_t oblock)
+static void mq_remove_mapping(struct dm_cache_policy *p, dm_oblock_t oblock)
{
- struct entry *e = hash_lookup(mq, oblock);
+ struct mq_policy *mq = to_mq_policy(p);
+ struct entry *e;
+
+ mutex_lock(&mq->lock);
+
+ e = hash_lookup(mq, oblock);
BUG_ON(!e || !e->in_cache);
del(mq, e);
e->in_cache = false;
push(mq, e);
-}
-static void mq_remove_mapping(struct dm_cache_policy *p, dm_oblock_t oblock)
-{
- struct mq_policy *mq = to_mq_policy(p);
-
- mutex_lock(&mq->lock);
- remove_mapping(mq, oblock);
mutex_unlock(&mq->lock);
}
diff --git a/drivers/media/i2c/ml86v7667.c b/drivers/media/i2c/ml86v7667.c
index efdc873..a985702 100644
--- a/drivers/media/i2c/ml86v7667.c
+++ b/drivers/media/i2c/ml86v7667.c
@@ -117,7 +117,7 @@
{
struct v4l2_subdev *sd = to_sd(ctrl);
struct i2c_client *client = v4l2_get_subdevdata(sd);
- int ret;
+ int ret = -EINVAL;
switch (ctrl->id) {
case V4L2_CID_BRIGHTNESS:
@@ -157,7 +157,7 @@
break;
}
- return 0;
+ return ret;
}
static int ml86v7667_querystd(struct v4l2_subdev *sd, v4l2_std_id *std)
diff --git a/drivers/media/platform/coda.c b/drivers/media/platform/coda.c
index df4ada88..bd9405d 100644
--- a/drivers/media/platform/coda.c
+++ b/drivers/media/platform/coda.c
@@ -1987,7 +1987,7 @@
#ifdef CONFIG_OF
static const struct of_device_id coda_dt_ids[] = {
- { .compatible = "fsl,imx27-vpu", .data = &coda_platform_ids[CODA_IMX27] },
+ { .compatible = "fsl,imx27-vpu", .data = &coda_devdata[CODA_IMX27] },
{ .compatible = "fsl,imx53-vpu", .data = &coda_devdata[CODA_IMX53] },
{ /* sentinel */ }
};
diff --git a/drivers/media/platform/s5p-g2d/g2d.c b/drivers/media/platform/s5p-g2d/g2d.c
index 553d87e..fd6289d 100644
--- a/drivers/media/platform/s5p-g2d/g2d.c
+++ b/drivers/media/platform/s5p-g2d/g2d.c
@@ -784,6 +784,7 @@
}
*vfd = g2d_videodev;
vfd->lock = &dev->mutex;
+ vfd->v4l2_dev = &dev->v4l2_dev;
ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
if (ret) {
v4l2_err(&dev->v4l2_dev, "Failed to register video device\n");
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c b/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
index 5296385..4f6dd42 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
@@ -344,7 +344,7 @@
pix_mp->num_planes = 2;
/* Set pixelformat to the format in which MFC
outputs the decoded frame */
- pix_mp->pixelformat = V4L2_PIX_FMT_NV12MT;
+ pix_mp->pixelformat = ctx->dst_fmt->fourcc;
pix_mp->plane_fmt[0].bytesperline = ctx->buf_width;
pix_mp->plane_fmt[0].sizeimage = ctx->luma_size;
pix_mp->plane_fmt[1].bytesperline = ctx->buf_width;
@@ -382,10 +382,16 @@
mfc_err("Unsupported format for source.\n");
return -EINVAL;
}
- if (!IS_MFCV6(dev) && (fmt->fourcc == V4L2_PIX_FMT_VP8)) {
- mfc_err("Not supported format.\n");
+ if (fmt->codec_mode == S5P_FIMV_CODEC_NONE) {
+ mfc_err("Unknown codec\n");
return -EINVAL;
}
+ if (!IS_MFCV6(dev)) {
+ if (fmt->fourcc == V4L2_PIX_FMT_VP8) {
+ mfc_err("Not supported format.\n");
+ return -EINVAL;
+ }
+ }
} else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
fmt = find_format(f, MFC_FMT_RAW);
if (!fmt) {
@@ -411,7 +417,6 @@
struct s5p_mfc_dev *dev = video_drvdata(file);
struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
int ret = 0;
- struct s5p_mfc_fmt *fmt;
struct v4l2_pix_format_mplane *pix_mp;
mfc_debug_enter();
@@ -425,54 +430,32 @@
goto out;
}
if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
- fmt = find_format(f, MFC_FMT_RAW);
- if (!fmt) {
- mfc_err("Unsupported format for source.\n");
- return -EINVAL;
- }
- if (!IS_MFCV6(dev) && (fmt->fourcc != V4L2_PIX_FMT_NV12MT)) {
- mfc_err("Not supported format.\n");
- return -EINVAL;
- } else if (IS_MFCV6(dev) &&
- (fmt->fourcc == V4L2_PIX_FMT_NV12MT)) {
- mfc_err("Not supported format.\n");
- return -EINVAL;
- }
- ctx->dst_fmt = fmt;
- mfc_debug_leave();
- return ret;
- } else if (f->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
- mfc_err("Wrong type error for S_FMT : %d", f->type);
- return -EINVAL;
- }
- fmt = find_format(f, MFC_FMT_DEC);
- if (!fmt || fmt->codec_mode == S5P_MFC_CODEC_NONE) {
- mfc_err("Unknown codec\n");
- ret = -EINVAL;
+ /* dst_fmt is validated by call to vidioc_try_fmt */
+ ctx->dst_fmt = find_format(f, MFC_FMT_RAW);
+ ret = 0;
goto out;
- }
- if (fmt->type != MFC_FMT_DEC) {
- mfc_err("Wrong format selected, you should choose "
- "format for decoding\n");
- ret = -EINVAL;
- goto out;
- }
- if (!IS_MFCV6(dev) && (fmt->fourcc == V4L2_PIX_FMT_VP8)) {
- mfc_err("Not supported format.\n");
- return -EINVAL;
- }
- ctx->src_fmt = fmt;
- ctx->codec_mode = fmt->codec_mode;
- mfc_debug(2, "The codec number is: %d\n", ctx->codec_mode);
- pix_mp->height = 0;
- pix_mp->width = 0;
- if (pix_mp->plane_fmt[0].sizeimage)
- ctx->dec_src_buf_size = pix_mp->plane_fmt[0].sizeimage;
- else
- pix_mp->plane_fmt[0].sizeimage = ctx->dec_src_buf_size =
+ } else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ /* src_fmt is validated by call to vidioc_try_fmt */
+ ctx->src_fmt = find_format(f, MFC_FMT_DEC);
+ ctx->codec_mode = ctx->src_fmt->codec_mode;
+ mfc_debug(2, "The codec number is: %d\n", ctx->codec_mode);
+ pix_mp->height = 0;
+ pix_mp->width = 0;
+ if (pix_mp->plane_fmt[0].sizeimage)
+ ctx->dec_src_buf_size = pix_mp->plane_fmt[0].sizeimage;
+ else
+ pix_mp->plane_fmt[0].sizeimage = ctx->dec_src_buf_size =
DEF_CPB_SIZE;
- pix_mp->plane_fmt[0].bytesperline = 0;
- ctx->state = MFCINST_INIT;
+ pix_mp->plane_fmt[0].bytesperline = 0;
+ ctx->state = MFCINST_INIT;
+ ret = 0;
+ goto out;
+ } else {
+ mfc_err("Wrong type error for S_FMT : %d", f->type);
+ ret = -EINVAL;
+ goto out;
+ }
+
out:
mfc_debug_leave();
return ret;
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
index 2549967..59e56f4 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
@@ -906,6 +906,7 @@
static int vidioc_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
{
+ struct s5p_mfc_dev *dev = video_drvdata(file);
struct s5p_mfc_fmt *fmt;
struct v4l2_pix_format_mplane *pix_fmt_mp = &f->fmt.pix_mp;
@@ -930,6 +931,18 @@
return -EINVAL;
}
+ if (!IS_MFCV6(dev)) {
+ if (fmt->fourcc == V4L2_PIX_FMT_NV12MT_16X16) {
+ mfc_err("Not supported format.\n");
+ return -EINVAL;
+ }
+ } else if (IS_MFCV6(dev)) {
+ if (fmt->fourcc == V4L2_PIX_FMT_NV12MT) {
+ mfc_err("Not supported format.\n");
+ return -EINVAL;
+ }
+ }
+
if (fmt->num_planes != pix_fmt_mp->num_planes) {
mfc_err("failed to try output format\n");
return -EINVAL;
@@ -947,7 +960,6 @@
{
struct s5p_mfc_dev *dev = video_drvdata(file);
struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
- struct s5p_mfc_fmt *fmt;
struct v4l2_pix_format_mplane *pix_fmt_mp = &f->fmt.pix_mp;
int ret = 0;
@@ -960,13 +972,9 @@
goto out;
}
if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
- fmt = find_format(f, MFC_FMT_ENC);
- if (!fmt) {
- mfc_err("failed to set capture format\n");
- return -EINVAL;
- }
+ /* dst_fmt is validated by call to vidioc_try_fmt */
+ ctx->dst_fmt = find_format(f, MFC_FMT_ENC);
ctx->state = MFCINST_INIT;
- ctx->dst_fmt = fmt;
ctx->codec_mode = ctx->dst_fmt->codec_mode;
ctx->enc_dst_buf_size = pix_fmt_mp->plane_fmt[0].sizeimage;
pix_fmt_mp->plane_fmt[0].bytesperline = 0;
@@ -987,28 +995,8 @@
}
mfc_debug(2, "Got instance number: %d\n", ctx->inst_no);
} else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
- fmt = find_format(f, MFC_FMT_RAW);
- if (!fmt) {
- mfc_err("failed to set output format\n");
- return -EINVAL;
- }
-
- if (!IS_MFCV6(dev) &&
- (fmt->fourcc == V4L2_PIX_FMT_NV12MT_16X16)) {
- mfc_err("Not supported format.\n");
- return -EINVAL;
- } else if (IS_MFCV6(dev) &&
- (fmt->fourcc == V4L2_PIX_FMT_NV12MT)) {
- mfc_err("Not supported format.\n");
- return -EINVAL;
- }
-
- if (fmt->num_planes != pix_fmt_mp->num_planes) {
- mfc_err("failed to set output format\n");
- ret = -EINVAL;
- goto out;
- }
- ctx->src_fmt = fmt;
+ /* src_fmt is validated by call to vidioc_try_fmt */
+ ctx->src_fmt = find_format(f, MFC_FMT_RAW);
ctx->img_width = pix_fmt_mp->width;
ctx->img_height = pix_fmt_mp->height;
mfc_debug(2, "codec number: %d\n", ctx->src_fmt->codec_mode);
diff --git a/drivers/media/usb/em28xx/em28xx-i2c.c b/drivers/media/usb/em28xx/em28xx-i2c.c
index 4851cc2..c4ff973 100644
--- a/drivers/media/usb/em28xx/em28xx-i2c.c
+++ b/drivers/media/usb/em28xx/em28xx-i2c.c
@@ -726,7 +726,7 @@
*eedata = data;
*eedata_len = len;
- dev_config = (void *)eedata;
+ dev_config = (void *)*eedata;
switch (le16_to_cpu(dev_config->chip_conf) >> 4 & 0x3) {
case 0:
diff --git a/drivers/media/usb/hdpvr/hdpvr-core.c b/drivers/media/usb/hdpvr/hdpvr-core.c
index cb69405..6e50707 100644
--- a/drivers/media/usb/hdpvr/hdpvr-core.c
+++ b/drivers/media/usb/hdpvr/hdpvr-core.c
@@ -303,6 +303,11 @@
dev->workqueue = 0;
+ /* init video transfer queues first of all */
+ /* to prevent oops in hdpvr_delete() on error paths */
+ INIT_LIST_HEAD(&dev->free_buff_list);
+ INIT_LIST_HEAD(&dev->rec_buff_list);
+
/* register v4l2_device early so it can be used for printks */
if (v4l2_device_register(&interface->dev, &dev->v4l2_dev)) {
dev_err(&interface->dev, "v4l2_device_register failed\n");
@@ -325,10 +330,6 @@
if (!dev->workqueue)
goto error;
- /* init video transfer queues */
- INIT_LIST_HEAD(&dev->free_buff_list);
- INIT_LIST_HEAD(&dev->rec_buff_list);
-
dev->options = hdpvr_default_options;
if (default_video_input < HDPVR_VIDEO_INPUTS)
@@ -405,7 +406,7 @@
video_nr[atomic_inc_return(&dev_nr)]);
if (retval < 0) {
v4l2_err(&dev->v4l2_dev, "registering videodev failed\n");
- goto error;
+ goto reg_fail;
}
/* let the user know what node this device is now attached to */
diff --git a/drivers/media/usb/usbtv/Kconfig b/drivers/media/usb/usbtv/Kconfig
index 8864436..7c5b860 100644
--- a/drivers/media/usb/usbtv/Kconfig
+++ b/drivers/media/usb/usbtv/Kconfig
@@ -1,6 +1,6 @@
config VIDEO_USBTV
tristate "USBTV007 video capture support"
- depends on VIDEO_DEV
+ depends on VIDEO_V4L2
select VIDEOBUF2_VMALLOC
---help---
diff --git a/drivers/media/usb/usbtv/usbtv.c b/drivers/media/usb/usbtv/usbtv.c
index bf43f87..91650173 100644
--- a/drivers/media/usb/usbtv/usbtv.c
+++ b/drivers/media/usb/usbtv/usbtv.c
@@ -57,7 +57,7 @@
#define USBTV_CHUNK_SIZE 256
#define USBTV_CHUNK 240
#define USBTV_CHUNKS (USBTV_WIDTH * USBTV_HEIGHT \
- / 2 / USBTV_CHUNK)
+ / 4 / USBTV_CHUNK)
/* Chunk header. */
#define USBTV_MAGIC_OK(chunk) ((be32_to_cpu(chunk[0]) & 0xff000000) \
@@ -89,6 +89,7 @@
/* Number of currently processed frame, useful find
* out when a new one begins. */
u32 frame_id;
+ int chunks_done;
int iso_size;
unsigned int sequence;
@@ -202,6 +203,26 @@
return 0;
}
+/* Copy data from chunk into a frame buffer, deinterlacing the data
+ * into every second line. Unfortunately, they don't align nicely into
+ * 720 pixel lines, as the chunk is 240 words long, which is 480 pixels.
+ * Therefore, we break down the chunk into two halves before copyting,
+ * so that we can interleave a line if needed. */
+static void usbtv_chunk_to_vbuf(u32 *frame, u32 *src, int chunk_no, int odd)
+{
+ int half;
+
+ for (half = 0; half < 2; half++) {
+ int part_no = chunk_no * 2 + half;
+ int line = part_no / 3;
+ int part_index = (line * 2 + !odd) * 3 + (part_no % 3);
+
+ u32 *dst = &frame[part_index * USBTV_CHUNK/2];
+ memcpy(dst, src, USBTV_CHUNK/2 * sizeof(*src));
+ src += USBTV_CHUNK/2;
+ }
+}
+
/* Called for each 256-byte image chunk.
* First word identifies the chunk, followed by 240 words of image
* data and padding. */
@@ -218,17 +239,17 @@
frame_id = USBTV_FRAME_ID(chunk);
odd = USBTV_ODD(chunk);
chunk_no = USBTV_CHUNK_NO(chunk);
-
- /* Deinterlace. TODO: Use interlaced frame format. */
- chunk_no = (chunk_no - chunk_no % 3) * 2 + chunk_no % 3;
- chunk_no += !odd * 3;
-
if (chunk_no >= USBTV_CHUNKS)
return;
/* Beginning of a frame. */
- if (chunk_no == 0)
+ if (chunk_no == 0) {
usbtv->frame_id = frame_id;
+ usbtv->chunks_done = 0;
+ }
+
+ if (usbtv->frame_id != frame_id)
+ return;
spin_lock_irqsave(&usbtv->buflock, flags);
if (list_empty(&usbtv->bufs)) {
@@ -241,19 +262,23 @@
buf = list_first_entry(&usbtv->bufs, struct usbtv_buf, list);
frame = vb2_plane_vaddr(&buf->vb, 0);
- /* Copy the chunk. */
- memcpy(&frame[chunk_no * USBTV_CHUNK], &chunk[1],
- USBTV_CHUNK * sizeof(chunk[1]));
+ /* Copy the chunk data. */
+ usbtv_chunk_to_vbuf(frame, &chunk[1], chunk_no, odd);
+ usbtv->chunks_done++;
/* Last chunk in a frame, signalling an end */
- if (usbtv->frame_id && chunk_no == USBTV_CHUNKS-1) {
+ if (odd && chunk_no == USBTV_CHUNKS-1) {
int size = vb2_plane_size(&buf->vb, 0);
+ enum vb2_buffer_state state = usbtv->chunks_done ==
+ USBTV_CHUNKS ?
+ VB2_BUF_STATE_DONE :
+ VB2_BUF_STATE_ERROR;
buf->vb.v4l2_buf.field = V4L2_FIELD_INTERLACED;
buf->vb.v4l2_buf.sequence = usbtv->sequence++;
v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp);
vb2_set_plane_payload(&buf->vb, 0, size);
- vb2_buffer_done(&buf->vb, VB2_BUF_STATE_DONE);
+ vb2_buffer_done(&buf->vb, state);
list_del(&buf->list);
}
@@ -518,7 +543,7 @@
if (*nbuffers < 2)
*nbuffers = 2;
*nplanes = 1;
- sizes[0] = USBTV_CHUNK * USBTV_CHUNKS * sizeof(u32);
+ sizes[0] = USBTV_WIDTH * USBTV_HEIGHT / 2 * sizeof(u32);
return 0;
}
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 07f257d4..e48cb33 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -3714,11 +3714,17 @@
* The bonding ndo_neigh_setup is called at init time beofre any
* slave exists. So we must declare proxy setup function which will
* be used at run time to resolve the actual slave neigh param setup.
+ *
+ * It's also called by master devices (such as vlans) to setup their
+ * underlying devices. In that case - do nothing, we're already set up from
+ * our init.
*/
static int bond_neigh_setup(struct net_device *dev,
struct neigh_parms *parms)
{
- parms->neigh_setup = bond_neigh_init;
+ /* modify only our neigh_parms */
+ if (parms->dev == dev)
+ parms->neigh_setup = bond_neigh_init;
return 0;
}
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c
index 25723d8..925ab8e 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb.c
@@ -649,7 +649,7 @@
if ((mc->ptr + rec_len) > mc->end)
goto decode_failed;
- memcpy(cf->data, mc->ptr, rec_len);
+ memcpy(cf->data, mc->ptr, cf->can_dlc);
mc->ptr += rec_len;
}
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index f1b121e..55d79cb 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -199,7 +199,7 @@
struct arc_emac_priv *priv = netdev_priv(ndev);
unsigned int work_done;
- for (work_done = 0; work_done <= budget; work_done++) {
+ for (work_done = 0; work_done < budget; work_done++) {
unsigned int *last_rx_bd = &priv->last_rx_bd;
struct net_device_stats *stats = &priv->stats;
struct buffer_state *rx_buff = &priv->rx_buff[*last_rx_bd];
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index d80e34b..00b88cb 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -1333,6 +1333,8 @@
BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN,
BNX2X_SP_RTNL_VFPF_STORM_RX_MODE,
BNX2X_SP_RTNL_HYPERVISOR_VLAN,
+ BNX2X_SP_RTNL_TX_STOP,
+ BNX2X_SP_RTNL_TX_RESUME,
};
struct bnx2x_prev_path_list {
@@ -1502,6 +1504,7 @@
#define BC_SUPPORTS_DCBX_MSG_NON_PMF (1 << 21)
#define IS_VF_FLAG (1 << 22)
#define INTERRUPTS_ENABLED_FLAG (1 << 23)
+#define BC_SUPPORTS_RMMOD_CMD (1 << 24)
#define BP_NOMCP(bp) ((bp)->flags & NO_MCP_FLAG)
@@ -1830,6 +1833,8 @@
int fp_array_size;
u32 dump_preset_idx;
+ bool stats_started;
+ struct semaphore stats_sema;
};
/* Tx queues may be less or equal to Rx queues */
@@ -2451,4 +2456,6 @@
BNX2X_PCI_LINK_SPEED_5000 = 5000,
BNX2X_PCI_LINK_SPEED_8000 = 8000
};
+
+void bnx2x_set_local_cmng(struct bnx2x *bp);
#endif /* bnx2x.h */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
index 0c94df4..fcf2761 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
@@ -30,10 +30,8 @@
#include "bnx2x_dcb.h"
/* forward declarations of dcbx related functions */
-static int bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp);
static void bnx2x_pfc_set_pfc(struct bnx2x *bp);
static void bnx2x_dcbx_update_ets_params(struct bnx2x *bp);
-static int bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp);
static void bnx2x_dcbx_get_ets_pri_pg_tbl(struct bnx2x *bp,
u32 *set_configuration_ets_pg,
u32 *pri_pg_tbl);
@@ -425,30 +423,52 @@
bnx2x_pfc_clear(bp);
}
-static int bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp)
+int bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp)
{
struct bnx2x_func_state_params func_params = {NULL};
+ int rc;
func_params.f_obj = &bp->func_obj;
func_params.cmd = BNX2X_F_CMD_TX_STOP;
+ __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
+ __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
+
DP(BNX2X_MSG_DCB, "STOP TRAFFIC\n");
- return bnx2x_func_state_change(bp, &func_params);
+
+ rc = bnx2x_func_state_change(bp, &func_params);
+ if (rc) {
+ BNX2X_ERR("Unable to hold traffic for HW configuration\n");
+ bnx2x_panic();
+ }
+
+ return rc;
}
-static int bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp)
+int bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp)
{
struct bnx2x_func_state_params func_params = {NULL};
struct bnx2x_func_tx_start_params *tx_params =
&func_params.params.tx_start;
+ int rc;
func_params.f_obj = &bp->func_obj;
func_params.cmd = BNX2X_F_CMD_TX_START;
+ __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
+ __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
+
bnx2x_dcbx_fw_struct(bp, tx_params);
DP(BNX2X_MSG_DCB, "START TRAFFIC\n");
- return bnx2x_func_state_change(bp, &func_params);
+
+ rc = bnx2x_func_state_change(bp, &func_params);
+ if (rc) {
+ BNX2X_ERR("Unable to resume traffic after HW configuration\n");
+ bnx2x_panic();
+ }
+
+ return rc;
}
static void bnx2x_dcbx_2cos_limit_update_ets_config(struct bnx2x *bp)
@@ -744,7 +764,9 @@
if (IS_MF(bp))
bnx2x_link_sync_notify(bp);
- bnx2x_dcbx_stop_hw_tx(bp);
+ set_bit(BNX2X_SP_RTNL_TX_STOP, &bp->sp_rtnl_state);
+
+ schedule_delayed_work(&bp->sp_rtnl_task, 0);
return;
}
@@ -753,7 +775,13 @@
bnx2x_pfc_set_pfc(bp);
bnx2x_dcbx_update_ets_params(bp);
- bnx2x_dcbx_resume_hw_tx(bp);
+
+ /* ets may affect cmng configuration: reinit it in hw */
+ bnx2x_set_local_cmng(bp);
+
+ set_bit(BNX2X_SP_RTNL_TX_RESUME, &bp->sp_rtnl_state);
+
+ schedule_delayed_work(&bp->sp_rtnl_task, 0);
return;
case BNX2X_DCBX_STATE_TX_RELEASED:
@@ -2363,21 +2391,24 @@
case DCB_FEATCFG_ATTR_PG:
if (bp->dcbx_local_feat.ets.enabled)
*flags |= DCB_FEATCFG_ENABLE;
- if (bp->dcbx_error & DCBX_LOCAL_ETS_ERROR)
+ if (bp->dcbx_error & (DCBX_LOCAL_ETS_ERROR |
+ DCBX_REMOTE_MIB_ERROR))
*flags |= DCB_FEATCFG_ERROR;
break;
case DCB_FEATCFG_ATTR_PFC:
if (bp->dcbx_local_feat.pfc.enabled)
*flags |= DCB_FEATCFG_ENABLE;
if (bp->dcbx_error & (DCBX_LOCAL_PFC_ERROR |
- DCBX_LOCAL_PFC_MISMATCH))
+ DCBX_LOCAL_PFC_MISMATCH |
+ DCBX_REMOTE_MIB_ERROR))
*flags |= DCB_FEATCFG_ERROR;
break;
case DCB_FEATCFG_ATTR_APP:
if (bp->dcbx_local_feat.app.enabled)
*flags |= DCB_FEATCFG_ENABLE;
if (bp->dcbx_error & (DCBX_LOCAL_APP_ERROR |
- DCBX_LOCAL_APP_MISMATCH))
+ DCBX_LOCAL_APP_MISMATCH |
+ DCBX_REMOTE_MIB_ERROR))
*flags |= DCB_FEATCFG_ERROR;
break;
default:
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
index 125bd1b..804b8f6 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
@@ -199,4 +199,7 @@
int bnx2x_dcbnl_update_applist(struct bnx2x *bp, bool delall);
#endif /* BCM_DCBNL */
+int bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp);
+int bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp);
+
#endif /* BNX2X_DCB_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
index 5018e52..32767f6 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
@@ -1300,6 +1300,9 @@
#define DRV_MSG_CODE_EEE_RESULTS_ACK 0xda000000
+ #define DRV_MSG_CODE_RMMOD 0xdb000000
+ #define REQ_BC_VER_4_RMMOD_CMD 0x0007080f
+
#define DRV_MSG_CODE_SET_MF_BW 0xe0000000
#define REQ_BC_VER_4_SET_MF_BW 0x00060202
#define DRV_MSG_CODE_SET_MF_BW_ACK 0xe1000000
@@ -1372,6 +1375,8 @@
#define FW_MSG_CODE_EEE_RESULS_ACK 0xda100000
+ #define FW_MSG_CODE_RMMOD_ACK 0xdb100000
+
#define FW_MSG_CODE_SET_MF_BW_SENT 0xe0000000
#define FW_MSG_CODE_SET_MF_BW_DONE 0xe1000000
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index e06186c..8bdc8b9 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -2261,6 +2261,23 @@
bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
}
+static void bnx2x_init_dropless_fc(struct bnx2x *bp)
+{
+ u32 pause_enabled = 0;
+
+ if (!CHIP_IS_E1(bp) && bp->dropless_fc && bp->link_vars.link_up) {
+ if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
+ pause_enabled = 1;
+
+ REG_WR(bp, BAR_USTRORM_INTMEM +
+ USTORM_ETH_PAUSE_ENABLED_OFFSET(BP_PORT(bp)),
+ pause_enabled);
+ }
+
+ DP(NETIF_MSG_IFUP | NETIF_MSG_LINK, "dropless_fc is %s\n",
+ pause_enabled ? "enabled" : "disabled");
+}
+
int bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
{
int rc, cfx_idx = bnx2x_get_link_cfg_idx(bp);
@@ -2294,6 +2311,8 @@
bnx2x_release_phy_lock(bp);
+ bnx2x_init_dropless_fc(bp);
+
bnx2x_calc_fc_adv(bp);
if (bp->link_vars.link_up) {
@@ -2315,6 +2334,8 @@
bnx2x_phy_init(&bp->link_params, &bp->link_vars);
bnx2x_release_phy_lock(bp);
+ bnx2x_init_dropless_fc(bp);
+
bnx2x_calc_fc_adv(bp);
} else
BNX2X_ERR("Bootcode is missing - can not set link\n");
@@ -2476,7 +2497,7 @@
input.port_rate = bp->link_vars.line_speed;
- if (cmng_type == CMNG_FNS_MINMAX) {
+ if (cmng_type == CMNG_FNS_MINMAX && input.port_rate) {
int vn;
/* read mf conf from shmem */
@@ -2533,6 +2554,21 @@
}
}
+/* init cmng mode in HW according to local configuration */
+void bnx2x_set_local_cmng(struct bnx2x *bp)
+{
+ int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
+
+ if (cmng_fns != CMNG_FNS_NONE) {
+ bnx2x_cmng_fns_init(bp, false, cmng_fns);
+ storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
+ } else {
+ /* rate shaping and fairness are disabled */
+ DP(NETIF_MSG_IFUP,
+ "single function mode without fairness\n");
+ }
+}
+
/* This function is called upon link interrupt */
static void bnx2x_link_attn(struct bnx2x *bp)
{
@@ -2541,21 +2577,10 @@
bnx2x_link_update(&bp->link_params, &bp->link_vars);
+ bnx2x_init_dropless_fc(bp);
+
if (bp->link_vars.link_up) {
- /* dropless flow control */
- if (!CHIP_IS_E1(bp) && bp->dropless_fc) {
- int port = BP_PORT(bp);
- u32 pause_enabled = 0;
-
- if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
- pause_enabled = 1;
-
- REG_WR(bp, BAR_USTRORM_INTMEM +
- USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
- pause_enabled);
- }
-
if (bp->link_vars.mac_type != MAC_TYPE_EMAC) {
struct host_port_stats *pstats;
@@ -2568,17 +2593,8 @@
bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
}
- if (bp->link_vars.link_up && bp->link_vars.line_speed) {
- int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
-
- if (cmng_fns != CMNG_FNS_NONE) {
- bnx2x_cmng_fns_init(bp, false, cmng_fns);
- storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
- } else
- /* rate shaping and fairness are disabled */
- DP(NETIF_MSG_IFUP,
- "single function mode without fairness\n");
- }
+ if (bp->link_vars.link_up && bp->link_vars.line_speed)
+ bnx2x_set_local_cmng(bp);
__bnx2x_link_report(bp);
@@ -9639,6 +9655,12 @@
&bp->sp_rtnl_state))
bnx2x_pf_set_vfs_vlan(bp);
+ if (test_and_clear_bit(BNX2X_SP_RTNL_TX_STOP, &bp->sp_rtnl_state))
+ bnx2x_dcbx_stop_hw_tx(bp);
+
+ if (test_and_clear_bit(BNX2X_SP_RTNL_TX_RESUME, &bp->sp_rtnl_state))
+ bnx2x_dcbx_resume_hw_tx(bp);
+
/* work which needs rtnl lock not-taken (as it takes the lock itself and
* can be called from other contexts as well)
*/
@@ -10362,6 +10384,10 @@
bp->flags |= (val >= REQ_BC_VER_4_DCBX_ADMIN_MSG_NON_PMF) ?
BC_SUPPORTS_DCBX_MSG_NON_PMF : 0;
+
+ bp->flags |= (val >= REQ_BC_VER_4_RMMOD_CMD) ?
+ BC_SUPPORTS_RMMOD_CMD : 0;
+
boot_mode = SHMEM_RD(bp,
dev_info.port_feature_config[BP_PORT(bp)].mba_config) &
PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK;
@@ -11137,6 +11163,9 @@
int tmp;
u32 cfg;
+ if (IS_VF(bp))
+ return 0;
+
if (IS_MF(bp) && !CHIP_IS_E1x(bp)) {
/* Take function: tmp = func */
tmp = BP_ABS_FUNC(bp);
@@ -11524,6 +11553,7 @@
mutex_init(&bp->port.phy_mutex);
mutex_init(&bp->fw_mb_mutex);
spin_lock_init(&bp->stats_lock);
+ sema_init(&bp->stats_sema, 1);
INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task);
@@ -12817,13 +12847,17 @@
bnx2x_dcbnl_update_applist(bp, true);
#endif
+ if (IS_PF(bp) &&
+ !BP_NOMCP(bp) &&
+ (bp->flags & BC_SUPPORTS_RMMOD_CMD))
+ bnx2x_fw_command(bp, DRV_MSG_CODE_RMMOD, 0);
+
/* Close the interface - either directly or implicitly */
if (remove_netdev) {
unregister_netdev(dev);
} else {
rtnl_lock();
- if (netif_running(dev))
- bnx2x_close(dev);
+ dev_close(dev);
rtnl_unlock();
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 95861ef..ad83f4b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -1747,11 +1747,8 @@
void bnx2x_iov_init_dmae(struct bnx2x *bp)
{
- DP(BNX2X_MSG_IOV, "SRIOV is %s\n", IS_SRIOV(bp) ? "ON" : "OFF");
- if (!IS_SRIOV(bp))
- return;
-
- REG_WR(bp, DMAE_REG_BACKWARD_COMP_EN, 0);
+ if (pci_find_ext_capability(bp->pdev, PCI_EXT_CAP_ID_SRIOV))
+ REG_WR(bp, DMAE_REG_BACKWARD_COMP_EN, 0);
}
static int bnx2x_vf_bus(struct bnx2x *bp, int vfid)
@@ -3084,8 +3081,9 @@
pci_disable_sriov(bp->pdev);
}
-static int bnx2x_vf_ndo_sanity(struct bnx2x *bp, int vfidx,
- struct bnx2x_virtf *vf)
+static int bnx2x_vf_ndo_prep(struct bnx2x *bp, int vfidx,
+ struct bnx2x_virtf **vf,
+ struct pf_vf_bulletin_content **bulletin)
{
if (bp->state != BNX2X_STATE_OPEN) {
BNX2X_ERR("vf ndo called though PF is down\n");
@@ -3103,12 +3101,22 @@
return -EINVAL;
}
- if (!vf) {
+ /* init members */
+ *vf = BP_VF(bp, vfidx);
+ *bulletin = BP_VF_BULLETIN(bp, vfidx);
+
+ if (!*vf) {
BNX2X_ERR("vf ndo called but vf was null. vfidx was %d\n",
vfidx);
return -EINVAL;
}
+ if (!*bulletin) {
+ BNX2X_ERR("vf ndo called but Bulletin Board struct is null. vfidx was %d\n",
+ vfidx);
+ return -EINVAL;
+ }
+
return 0;
}
@@ -3116,17 +3124,19 @@
struct ifla_vf_info *ivi)
{
struct bnx2x *bp = netdev_priv(dev);
- struct bnx2x_virtf *vf = BP_VF(bp, vfidx);
- struct bnx2x_vlan_mac_obj *mac_obj = &bnx2x_vfq(vf, 0, mac_obj);
- struct bnx2x_vlan_mac_obj *vlan_obj = &bnx2x_vfq(vf, 0, vlan_obj);
- struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vfidx);
+ struct bnx2x_virtf *vf = NULL;
+ struct pf_vf_bulletin_content *bulletin = NULL;
+ struct bnx2x_vlan_mac_obj *mac_obj;
+ struct bnx2x_vlan_mac_obj *vlan_obj;
int rc;
- /* sanity */
- rc = bnx2x_vf_ndo_sanity(bp, vfidx, vf);
+ /* sanity and init */
+ rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin);
if (rc)
return rc;
- if (!mac_obj || !vlan_obj || !bulletin) {
+ mac_obj = &bnx2x_vfq(vf, 0, mac_obj);
+ vlan_obj = &bnx2x_vfq(vf, 0, vlan_obj);
+ if (!mac_obj || !vlan_obj) {
BNX2X_ERR("VF partially initialized\n");
return -EINVAL;
}
@@ -3183,11 +3193,11 @@
{
struct bnx2x *bp = netdev_priv(dev);
int rc, q_logical_state;
- struct bnx2x_virtf *vf = BP_VF(bp, vfidx);
- struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vfidx);
+ struct bnx2x_virtf *vf = NULL;
+ struct pf_vf_bulletin_content *bulletin = NULL;
- /* sanity */
- rc = bnx2x_vf_ndo_sanity(bp, vfidx, vf);
+ /* sanity and init */
+ rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin);
if (rc)
return rc;
if (!is_valid_ether_addr(mac)) {
@@ -3249,11 +3259,11 @@
{
struct bnx2x *bp = netdev_priv(dev);
int rc, q_logical_state;
- struct bnx2x_virtf *vf = BP_VF(bp, vfidx);
- struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vfidx);
+ struct bnx2x_virtf *vf = NULL;
+ struct pf_vf_bulletin_content *bulletin = NULL;
- /* sanity */
- rc = bnx2x_vf_ndo_sanity(bp, vfidx, vf);
+ /* sanity and init */
+ rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin);
if (rc)
return rc;
@@ -3463,7 +3473,7 @@
alloc_mem_err:
BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping,
sizeof(struct bnx2x_vf_mbx_msg));
- BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping,
+ BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->pf2vf_bulletin_mapping,
sizeof(union pf_vf_bulletin));
return -ENOMEM;
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
index 98366ab..d63d132 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
@@ -221,7 +221,8 @@
* Statistics service functions
*/
-static void bnx2x_stats_pmf_update(struct bnx2x *bp)
+/* should be called under stats_sema */
+static void __bnx2x_stats_pmf_update(struct bnx2x *bp)
{
struct dmae_command *dmae;
u32 opcode;
@@ -518,7 +519,8 @@
*stats_comp = 0;
}
-static void bnx2x_stats_start(struct bnx2x *bp)
+/* should be called under stats_sema */
+static void __bnx2x_stats_start(struct bnx2x *bp)
{
/* vfs travel through here as part of the statistics FSM, but no action
* is required
@@ -534,13 +536,34 @@
bnx2x_hw_stats_post(bp);
bnx2x_storm_stats_post(bp);
+
+ bp->stats_started = true;
+}
+
+static void bnx2x_stats_start(struct bnx2x *bp)
+{
+ if (down_timeout(&bp->stats_sema, HZ/10))
+ BNX2X_ERR("Unable to acquire stats lock\n");
+ __bnx2x_stats_start(bp);
+ up(&bp->stats_sema);
}
static void bnx2x_stats_pmf_start(struct bnx2x *bp)
{
+ if (down_timeout(&bp->stats_sema, HZ/10))
+ BNX2X_ERR("Unable to acquire stats lock\n");
bnx2x_stats_comp(bp);
- bnx2x_stats_pmf_update(bp);
- bnx2x_stats_start(bp);
+ __bnx2x_stats_pmf_update(bp);
+ __bnx2x_stats_start(bp);
+ up(&bp->stats_sema);
+}
+
+static void bnx2x_stats_pmf_update(struct bnx2x *bp)
+{
+ if (down_timeout(&bp->stats_sema, HZ/10))
+ BNX2X_ERR("Unable to acquire stats lock\n");
+ __bnx2x_stats_pmf_update(bp);
+ up(&bp->stats_sema);
}
static void bnx2x_stats_restart(struct bnx2x *bp)
@@ -550,8 +573,11 @@
*/
if (IS_VF(bp))
return;
+ if (down_timeout(&bp->stats_sema, HZ/10))
+ BNX2X_ERR("Unable to acquire stats lock\n");
bnx2x_stats_comp(bp);
- bnx2x_stats_start(bp);
+ __bnx2x_stats_start(bp);
+ up(&bp->stats_sema);
}
static void bnx2x_bmac_stats_update(struct bnx2x *bp)
@@ -888,9 +914,7 @@
/* Make sure we use the value of the counter
* used for sending the last stats ramrod.
*/
- spin_lock_bh(&bp->stats_lock);
cur_stats_counter = bp->stats_counter - 1;
- spin_unlock_bh(&bp->stats_lock);
/* are storm stats valid? */
if (le16_to_cpu(counters->xstats_counter) != cur_stats_counter) {
@@ -1227,12 +1251,18 @@
{
u32 *stats_comp = bnx2x_sp(bp, stats_comp);
- if (bnx2x_edebug_stats_stopped(bp))
+ /* we run update from timer context, so give up
+ * if somebody is in the middle of transition
+ */
+ if (down_trylock(&bp->stats_sema))
return;
+ if (bnx2x_edebug_stats_stopped(bp) || !bp->stats_started)
+ goto out;
+
if (IS_PF(bp)) {
if (*stats_comp != DMAE_COMP_VAL)
- return;
+ goto out;
if (bp->port.pmf)
bnx2x_hw_stats_update(bp);
@@ -1242,7 +1272,7 @@
BNX2X_ERR("storm stats were not updated for 3 times\n");
bnx2x_panic();
}
- return;
+ goto out;
}
} else {
/* vf doesn't collect HW statistics, and doesn't get completions
@@ -1256,7 +1286,7 @@
/* vf is done */
if (IS_VF(bp))
- return;
+ goto out;
if (netif_msg_timer(bp)) {
struct bnx2x_eth_stats *estats = &bp->eth_stats;
@@ -1267,6 +1297,9 @@
bnx2x_hw_stats_post(bp);
bnx2x_storm_stats_post(bp);
+
+out:
+ up(&bp->stats_sema);
}
static void bnx2x_port_stats_stop(struct bnx2x *bp)
@@ -1332,6 +1365,11 @@
{
int update = 0;
+ if (down_timeout(&bp->stats_sema, HZ/10))
+ BNX2X_ERR("Unable to acquire stats lock\n");
+
+ bp->stats_started = false;
+
bnx2x_stats_comp(bp);
if (bp->port.pmf)
@@ -1348,6 +1386,8 @@
bnx2x_hw_stats_post(bp);
bnx2x_stats_comp(bp);
}
+
+ up(&bp->stats_sema);
}
static void bnx2x_stats_do_nothing(struct bnx2x *bp)
@@ -1376,15 +1416,17 @@
void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
{
enum bnx2x_stats_state state;
+ void (*action)(struct bnx2x *bp);
if (unlikely(bp->panic))
return;
spin_lock_bh(&bp->stats_lock);
state = bp->stats_state;
bp->stats_state = bnx2x_stats_stm[state][event].next_state;
+ action = bnx2x_stats_stm[state][event].action;
spin_unlock_bh(&bp->stats_lock);
- bnx2x_stats_stm[state][event].action(bp);
+ action(bp);
if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index ddebc7a..0da2214 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -17796,8 +17796,10 @@
done:
if (state == pci_channel_io_perm_failure) {
- tg3_napi_enable(tp);
- dev_close(netdev);
+ if (netdev) {
+ tg3_napi_enable(tp);
+ dev_close(netdev);
+ }
err = PCI_ERS_RESULT_DISCONNECT;
} else {
pci_disable_device(pdev);
@@ -17827,7 +17829,8 @@
rtnl_lock();
if (pci_enable_device(pdev)) {
- netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
+ dev_err(&pdev->dev,
+ "Cannot re-enable PCI device after reset.\n");
goto done;
}
@@ -17835,7 +17838,7 @@
pci_restore_state(pdev);
pci_save_state(pdev);
- if (!netif_running(netdev)) {
+ if (!netdev || !netif_running(netdev)) {
rc = PCI_ERS_RESULT_RECOVERED;
goto done;
}
@@ -17847,7 +17850,7 @@
rc = PCI_ERS_RESULT_RECOVERED;
done:
- if (rc != PCI_ERS_RESULT_RECOVERED && netif_running(netdev)) {
+ if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) {
tg3_napi_enable(tp);
dev_close(netdev);
}
diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c
index 687ec4a..9c89dc8 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c
@@ -455,11 +455,6 @@
q->pg_chunk.offset = 0;
mapping = pci_map_page(adapter->pdev, q->pg_chunk.page,
0, q->alloc_size, PCI_DMA_FROMDEVICE);
- if (unlikely(pci_dma_mapping_error(adapter->pdev, mapping))) {
- __free_pages(q->pg_chunk.page, order);
- q->pg_chunk.page = NULL;
- return -EIO;
- }
q->pg_chunk.mapping = mapping;
}
sd->pg_chunk = q->pg_chunk;
@@ -954,75 +949,40 @@
return flits_to_desc(flits);
}
-
-/* map_skb - map a packet main body and its page fragments
- * @pdev: the PCI device
- * @skb: the packet
- * @addr: placeholder to save the mapped addresses
- *
- * map the main body of an sk_buff and its page fragments, if any.
- */
-static int map_skb(struct pci_dev *pdev, const struct sk_buff *skb,
- dma_addr_t *addr)
-{
- const skb_frag_t *fp, *end;
- const struct skb_shared_info *si;
-
- *addr = pci_map_single(pdev, skb->data, skb_headlen(skb),
- PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(pdev, *addr))
- goto out_err;
-
- si = skb_shinfo(skb);
- end = &si->frags[si->nr_frags];
-
- for (fp = si->frags; fp < end; fp++) {
- *++addr = skb_frag_dma_map(&pdev->dev, fp, 0, skb_frag_size(fp),
- DMA_TO_DEVICE);
- if (pci_dma_mapping_error(pdev, *addr))
- goto unwind;
- }
- return 0;
-
-unwind:
- while (fp-- > si->frags)
- dma_unmap_page(&pdev->dev, *--addr, skb_frag_size(fp),
- DMA_TO_DEVICE);
-
- pci_unmap_single(pdev, addr[-1], skb_headlen(skb), PCI_DMA_TODEVICE);
-out_err:
- return -ENOMEM;
-}
-
/**
- * write_sgl - populate a scatter/gather list for a packet
+ * make_sgl - populate a scatter/gather list for a packet
* @skb: the packet
* @sgp: the SGL to populate
* @start: start address of skb main body data to include in the SGL
* @len: length of skb main body data to include in the SGL
- * @addr: the list of the mapped addresses
+ * @pdev: the PCI device
*
- * Copies the scatter/gather list for the buffers that make up a packet
+ * Generates a scatter/gather list for the buffers that make up a packet
* and returns the SGL size in 8-byte words. The caller must size the SGL
* appropriately.
*/
-static inline unsigned int write_sgl(const struct sk_buff *skb,
+static inline unsigned int make_sgl(const struct sk_buff *skb,
struct sg_ent *sgp, unsigned char *start,
- unsigned int len, const dma_addr_t *addr)
+ unsigned int len, struct pci_dev *pdev)
{
- unsigned int i, j = 0, k = 0, nfrags;
+ dma_addr_t mapping;
+ unsigned int i, j = 0, nfrags;
if (len) {
+ mapping = pci_map_single(pdev, start, len, PCI_DMA_TODEVICE);
sgp->len[0] = cpu_to_be32(len);
- sgp->addr[j++] = cpu_to_be64(addr[k++]);
+ sgp->addr[0] = cpu_to_be64(mapping);
+ j = 1;
}
nfrags = skb_shinfo(skb)->nr_frags;
for (i = 0; i < nfrags; i++) {
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ mapping = skb_frag_dma_map(&pdev->dev, frag, 0, skb_frag_size(frag),
+ DMA_TO_DEVICE);
sgp->len[j] = cpu_to_be32(skb_frag_size(frag));
- sgp->addr[j] = cpu_to_be64(addr[k++]);
+ sgp->addr[j] = cpu_to_be64(mapping);
j ^= 1;
if (j == 0)
++sgp;
@@ -1178,7 +1138,7 @@
const struct port_info *pi,
unsigned int pidx, unsigned int gen,
struct sge_txq *q, unsigned int ndesc,
- unsigned int compl, const dma_addr_t *addr)
+ unsigned int compl)
{
unsigned int flits, sgl_flits, cntrl, tso_info;
struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
@@ -1236,7 +1196,7 @@
}
sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
- sgl_flits = write_sgl(skb, sgp, skb->data, skb_headlen(skb), addr);
+ sgl_flits = make_sgl(skb, sgp, skb->data, skb_headlen(skb), adap->pdev);
write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen,
htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | compl),
@@ -1267,7 +1227,6 @@
struct netdev_queue *txq;
struct sge_qset *qs;
struct sge_txq *q;
- dma_addr_t addr[MAX_SKB_FRAGS + 1];
/*
* The chip min packet length is 9 octets but play safe and reject
@@ -1296,11 +1255,6 @@
return NETDEV_TX_BUSY;
}
- if (unlikely(map_skb(adap->pdev, skb, addr) < 0)) {
- dev_kfree_skb(skb);
- return NETDEV_TX_OK;
- }
-
q->in_use += ndesc;
if (unlikely(credits - ndesc < q->stop_thres)) {
t3_stop_tx_queue(txq, qs, q);
@@ -1358,7 +1312,7 @@
if (likely(!skb_shared(skb)))
skb_orphan(skb);
- write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl, addr);
+ write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl);
check_ring_tx_db(adap, q);
return NETDEV_TX_OK;
}
@@ -1623,8 +1577,7 @@
*/
static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb,
struct sge_txq *q, unsigned int pidx,
- unsigned int gen, unsigned int ndesc,
- const dma_addr_t *addr)
+ unsigned int gen, unsigned int ndesc)
{
unsigned int sgl_flits, flits;
struct work_request_hdr *from;
@@ -1645,9 +1598,9 @@
flits = skb_transport_offset(skb) / 8;
sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
- sgl_flits = write_sgl(skb, sgp, skb_transport_header(skb),
- skb_tail_pointer(skb) -
- skb_transport_header(skb), addr);
+ sgl_flits = make_sgl(skb, sgp, skb_transport_header(skb),
+ skb->tail - skb->transport_header,
+ adap->pdev);
if (need_skb_unmap()) {
setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits);
skb->destructor = deferred_unmap_destructor;
@@ -1705,11 +1658,6 @@
goto again;
}
- if (map_skb(adap->pdev, skb, (dma_addr_t *)skb->head)) {
- spin_unlock(&q->lock);
- return NET_XMIT_SUCCESS;
- }
-
gen = q->gen;
q->in_use += ndesc;
pidx = q->pidx;
@@ -1720,7 +1668,7 @@
}
spin_unlock(&q->lock);
- write_ofld_wr(adap, skb, q, pidx, gen, ndesc, (dma_addr_t *)skb->head);
+ write_ofld_wr(adap, skb, q, pidx, gen, ndesc);
check_ring_tx_db(adap, q);
return NET_XMIT_SUCCESS;
}
@@ -1738,7 +1686,6 @@
struct sge_txq *q = &qs->txq[TXQ_OFLD];
const struct port_info *pi = netdev_priv(qs->netdev);
struct adapter *adap = pi->adapter;
- unsigned int written = 0;
spin_lock(&q->lock);
again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
@@ -1758,14 +1705,10 @@
break;
}
- if (map_skb(adap->pdev, skb, (dma_addr_t *)skb->head))
- break;
-
gen = q->gen;
q->in_use += ndesc;
pidx = q->pidx;
q->pidx += ndesc;
- written += ndesc;
if (q->pidx >= q->size) {
q->pidx -= q->size;
q->gen ^= 1;
@@ -1773,8 +1716,7 @@
__skb_unlink(skb, &q->sendq);
spin_unlock(&q->lock);
- write_ofld_wr(adap, skb, q, pidx, gen, ndesc,
- (dma_addr_t *)skb->head);
+ write_ofld_wr(adap, skb, q, pidx, gen, ndesc);
spin_lock(&q->lock);
}
spin_unlock(&q->lock);
@@ -1784,9 +1726,8 @@
set_bit(TXQ_LAST_PKT_DB, &q->flags);
#endif
wmb();
- if (likely(written))
- t3_write_reg(adap, A_SG_KDOORBELL,
- F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
+ t3_write_reg(adap, A_SG_KDOORBELL,
+ F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
}
/**
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 6e6e0a1..8ec5d74 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -3048,6 +3048,9 @@
adapter->max_event_queues = le16_to_cpu(desc->eq_count);
adapter->if_cap_flags = le32_to_cpu(desc->cap_flags);
+
+ /* Clear flags that driver is not interested in */
+ adapter->if_cap_flags &= BE_IF_CAP_FLAGS_WANT;
}
err:
mutex_unlock(&adapter->mbox_lock);
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index 5228d88..1b3b9e8 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -563,6 +563,12 @@
BE_IF_FLAGS_MULTICAST = 0x1000
};
+#define BE_IF_CAP_FLAGS_WANT (BE_IF_FLAGS_RSS | BE_IF_FLAGS_PROMISCUOUS |\
+ BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_VLAN_PROMISCUOUS |\
+ BE_IF_FLAGS_VLAN | BE_IF_FLAGS_MCAST_PROMISCUOUS |\
+ BE_IF_FLAGS_PASS_L3L4_ERRORS | BE_IF_FLAGS_MULTICAST |\
+ BE_IF_FLAGS_UNTAGGED)
+
/* An RX interface is an object with one or more MAC addresses and
* filtering capabilities. */
struct be_cmd_req_if_create {
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 181edb5..4559c35 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -2563,8 +2563,8 @@
/* Wait for all pending tx completions to arrive so that
* all tx skbs are freed.
*/
- be_tx_compl_clean(adapter);
netif_tx_disable(netdev);
+ be_tx_compl_clean(adapter);
be_rx_qs_destroy(adapter);
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index c896079..ef94a59 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -931,17 +931,20 @@
}
/* Allocate and setup a new buffer for receiving */
-static void skge_rx_setup(struct skge_port *skge, struct skge_element *e,
- struct sk_buff *skb, unsigned int bufsize)
+static int skge_rx_setup(struct skge_port *skge, struct skge_element *e,
+ struct sk_buff *skb, unsigned int bufsize)
{
struct skge_rx_desc *rd = e->desc;
- u64 map;
+ dma_addr_t map;
map = pci_map_single(skge->hw->pdev, skb->data, bufsize,
PCI_DMA_FROMDEVICE);
- rd->dma_lo = map;
- rd->dma_hi = map >> 32;
+ if (pci_dma_mapping_error(skge->hw->pdev, map))
+ return -1;
+
+ rd->dma_lo = lower_32_bits(map);
+ rd->dma_hi = upper_32_bits(map);
e->skb = skb;
rd->csum1_start = ETH_HLEN;
rd->csum2_start = ETH_HLEN;
@@ -953,6 +956,7 @@
rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | bufsize;
dma_unmap_addr_set(e, mapaddr, map);
dma_unmap_len_set(e, maplen, bufsize);
+ return 0;
}
/* Resume receiving using existing skb,
@@ -1014,7 +1018,10 @@
return -ENOMEM;
skb_reserve(skb, NET_IP_ALIGN);
- skge_rx_setup(skge, e, skb, skge->rx_buf_size);
+ if (skge_rx_setup(skge, e, skb, skge->rx_buf_size) < 0) {
+ dev_kfree_skb(skb);
+ return -EIO;
+ }
} while ((e = e->next) != ring->start);
ring->to_clean = ring->start;
@@ -2544,7 +2551,7 @@
BUG_ON(skge->dma & 7);
- if ((u64)skge->dma >> 32 != ((u64) skge->dma + skge->mem_size) >> 32) {
+ if (upper_32_bits(skge->dma) != upper_32_bits(skge->dma + skge->mem_size)) {
dev_err(&hw->pdev->dev, "pci_alloc_consistent region crosses 4G boundary\n");
err = -EINVAL;
goto free_pci_mem;
@@ -2729,7 +2736,7 @@
struct skge_tx_desc *td;
int i;
u32 control, len;
- u64 map;
+ dma_addr_t map;
if (skb_padto(skb, ETH_ZLEN))
return NETDEV_TX_OK;
@@ -2743,11 +2750,14 @@
e->skb = skb;
len = skb_headlen(skb);
map = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE);
+ if (pci_dma_mapping_error(hw->pdev, map))
+ goto mapping_error;
+
dma_unmap_addr_set(e, mapaddr, map);
dma_unmap_len_set(e, maplen, len);
- td->dma_lo = map;
- td->dma_hi = map >> 32;
+ td->dma_lo = lower_32_bits(map);
+ td->dma_hi = upper_32_bits(map);
if (skb->ip_summed == CHECKSUM_PARTIAL) {
const int offset = skb_checksum_start_offset(skb);
@@ -2778,14 +2788,16 @@
map = skb_frag_dma_map(&hw->pdev->dev, frag, 0,
skb_frag_size(frag), DMA_TO_DEVICE);
+ if (dma_mapping_error(&hw->pdev->dev, map))
+ goto mapping_unwind;
e = e->next;
e->skb = skb;
tf = e->desc;
BUG_ON(tf->control & BMU_OWN);
- tf->dma_lo = map;
- tf->dma_hi = (u64) map >> 32;
+ tf->dma_lo = lower_32_bits(map);
+ tf->dma_hi = upper_32_bits(map);
dma_unmap_addr_set(e, mapaddr, map);
dma_unmap_len_set(e, maplen, skb_frag_size(frag));
@@ -2815,6 +2827,26 @@
}
return NETDEV_TX_OK;
+
+mapping_unwind:
+ e = skge->tx_ring.to_use;
+ pci_unmap_single(hw->pdev,
+ dma_unmap_addr(e, mapaddr),
+ dma_unmap_len(e, maplen),
+ PCI_DMA_TODEVICE);
+ while (i-- > 0) {
+ e = e->next;
+ pci_unmap_page(hw->pdev,
+ dma_unmap_addr(e, mapaddr),
+ dma_unmap_len(e, maplen),
+ PCI_DMA_TODEVICE);
+ }
+
+mapping_error:
+ if (net_ratelimit())
+ dev_warn(&hw->pdev->dev, "%s: tx mapping error\n", dev->name);
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
}
@@ -3045,11 +3077,13 @@
pci_dma_sync_single_for_cpu(skge->hw->pdev,
dma_unmap_addr(e, mapaddr),
- len, PCI_DMA_FROMDEVICE);
+ dma_unmap_len(e, maplen),
+ PCI_DMA_FROMDEVICE);
skb_copy_from_linear_data(e->skb, skb->data, len);
pci_dma_sync_single_for_device(skge->hw->pdev,
dma_unmap_addr(e, mapaddr),
- len, PCI_DMA_FROMDEVICE);
+ dma_unmap_len(e, maplen),
+ PCI_DMA_FROMDEVICE);
skge_rx_reuse(e, skge->rx_buf_size);
} else {
struct sk_buff *nskb;
@@ -3058,13 +3092,17 @@
if (!nskb)
goto resubmit;
+ if (skge_rx_setup(skge, e, nskb, skge->rx_buf_size) < 0) {
+ dev_kfree_skb(nskb);
+ goto resubmit;
+ }
+
pci_unmap_single(skge->hw->pdev,
dma_unmap_addr(e, mapaddr),
dma_unmap_len(e, maplen),
PCI_DMA_FROMDEVICE);
skb = e->skb;
prefetch(skb->data);
- skge_rx_setup(skge, e, nskb, skge->rx_buf_size);
}
skb_put(skb, len);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index c571de8..5472cbd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -46,7 +46,7 @@
#include "mlx5_core.h"
enum {
- CMD_IF_REV = 4,
+ CMD_IF_REV = 5,
};
enum {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index c02cbcf..443cc4d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -268,7 +268,7 @@
case MLX5_EVENT_TYPE_PAGE_REQUEST:
{
u16 func_id = be16_to_cpu(eqe->data.req_pages.func_id);
- s16 npages = be16_to_cpu(eqe->data.req_pages.num_pages);
+ s32 npages = be32_to_cpu(eqe->data.req_pages.num_pages);
mlx5_core_dbg(dev, "page request for func 0x%x, napges %d\n", func_id, npages);
mlx5_core_req_pages_handler(dev, func_id, npages);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index 72a5222..f012658 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -113,7 +113,7 @@
caps->log_max_srq = out->hca_cap.log_max_srqs & 0x1f;
caps->local_ca_ack_delay = out->hca_cap.local_ca_ack_delay & 0x1f;
caps->log_max_mcg = out->hca_cap.log_max_mcg;
- caps->max_qp_mcg = be16_to_cpu(out->hca_cap.max_qp_mcg);
+ caps->max_qp_mcg = be32_to_cpu(out->hca_cap.max_qp_mcg) & 0xffffff;
caps->max_ra_res_qp = 1 << (out->hca_cap.log_max_ra_res_qp & 0x3f);
caps->max_ra_req_qp = 1 << (out->hca_cap.log_max_ra_req_qp & 0x3f);
caps->max_srq_wqes = 1 << out->hca_cap.log_max_srq_sz;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 748f10a..3e6670c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -55,33 +55,9 @@
};
static DEFINE_SPINLOCK(health_lock);
-
static LIST_HEAD(health_list);
static struct work_struct health_work;
-static health_handler_t reg_handler;
-int mlx5_register_health_report_handler(health_handler_t handler)
-{
- spin_lock_irq(&health_lock);
- if (reg_handler) {
- spin_unlock_irq(&health_lock);
- return -EEXIST;
- }
- reg_handler = handler;
- spin_unlock_irq(&health_lock);
-
- return 0;
-}
-EXPORT_SYMBOL(mlx5_register_health_report_handler);
-
-void mlx5_unregister_health_report_handler(void)
-{
- spin_lock_irq(&health_lock);
- reg_handler = NULL;
- spin_unlock_irq(&health_lock);
-}
-EXPORT_SYMBOL(mlx5_unregister_health_report_handler);
-
static void health_care(struct work_struct *work)
{
struct mlx5_core_health *health, *n;
@@ -98,11 +74,8 @@
priv = container_of(health, struct mlx5_priv, health);
dev = container_of(priv, struct mlx5_core_dev, priv);
mlx5_core_warn(dev, "handling bad device here\n");
+ /* nothing yet */
spin_lock_irq(&health_lock);
- if (reg_handler)
- reg_handler(dev->pdev, health->health,
- sizeof(health->health));
-
list_del_init(&health->list);
spin_unlock_irq(&health_lock);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index 4a3e137..3a2408d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -43,10 +43,16 @@
MLX5_PAGES_TAKE = 2
};
+enum {
+ MLX5_BOOT_PAGES = 1,
+ MLX5_INIT_PAGES = 2,
+ MLX5_POST_INIT_PAGES = 3
+};
+
struct mlx5_pages_req {
struct mlx5_core_dev *dev;
u32 func_id;
- s16 npages;
+ s32 npages;
struct work_struct work;
};
@@ -64,27 +70,23 @@
struct mlx5_query_pages_outbox {
struct mlx5_outbox_hdr hdr;
- __be16 num_boot_pages;
+ __be16 rsvd;
__be16 func_id;
- __be16 init_pages;
- __be16 num_pages;
+ __be32 num_pages;
};
struct mlx5_manage_pages_inbox {
struct mlx5_inbox_hdr hdr;
- __be16 rsvd0;
+ __be16 rsvd;
__be16 func_id;
- __be16 rsvd1;
- __be16 num_entries;
- u8 rsvd2[16];
+ __be32 num_entries;
__be64 pas[0];
};
struct mlx5_manage_pages_outbox {
struct mlx5_outbox_hdr hdr;
- u8 rsvd0[2];
- __be16 num_entries;
- u8 rsvd1[20];
+ __be32 num_entries;
+ u8 rsvd[4];
__be64 pas[0];
};
@@ -146,7 +148,7 @@
}
static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
- s16 *pages, s16 *init_pages, u16 *boot_pages)
+ s32 *npages, int boot)
{
struct mlx5_query_pages_inbox in;
struct mlx5_query_pages_outbox out;
@@ -155,6 +157,8 @@
memset(&in, 0, sizeof(in));
memset(&out, 0, sizeof(out));
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_PAGES);
+ in.hdr.opmod = boot ? cpu_to_be16(MLX5_BOOT_PAGES) : cpu_to_be16(MLX5_INIT_PAGES);
+
err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
if (err)
return err;
@@ -162,15 +166,7 @@
if (out.hdr.status)
return mlx5_cmd_status_to_err(&out.hdr);
- if (pages)
- *pages = be16_to_cpu(out.num_pages);
-
- if (init_pages)
- *init_pages = be16_to_cpu(out.init_pages);
-
- if (boot_pages)
- *boot_pages = be16_to_cpu(out.num_boot_pages);
-
+ *npages = be32_to_cpu(out.num_pages);
*func_id = be16_to_cpu(out.func_id);
return err;
@@ -224,7 +220,7 @@
in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES);
in->hdr.opmod = cpu_to_be16(MLX5_PAGES_GIVE);
in->func_id = cpu_to_be16(func_id);
- in->num_entries = cpu_to_be16(npages);
+ in->num_entries = cpu_to_be32(npages);
err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
mlx5_core_dbg(dev, "err %d\n", err);
if (err) {
@@ -292,7 +288,7 @@
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES);
in.hdr.opmod = cpu_to_be16(MLX5_PAGES_TAKE);
in.func_id = cpu_to_be16(func_id);
- in.num_entries = cpu_to_be16(npages);
+ in.num_entries = cpu_to_be32(npages);
mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen);
err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen);
if (err) {
@@ -306,7 +302,7 @@
goto out_free;
}
- num_claimed = be16_to_cpu(out->num_entries);
+ num_claimed = be32_to_cpu(out->num_entries);
if (nclaimed)
*nclaimed = num_claimed;
@@ -345,7 +341,7 @@
}
void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
- s16 npages)
+ s32 npages)
{
struct mlx5_pages_req *req;
@@ -364,20 +360,18 @@
int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot)
{
- u16 uninitialized_var(boot_pages);
- s16 uninitialized_var(init_pages);
u16 uninitialized_var(func_id);
+ s32 uninitialized_var(npages);
int err;
- err = mlx5_cmd_query_pages(dev, &func_id, NULL, &init_pages,
- &boot_pages);
+ err = mlx5_cmd_query_pages(dev, &func_id, &npages, boot);
if (err)
return err;
+ mlx5_core_dbg(dev, "requested %d %s pages for func_id 0x%x\n",
+ npages, boot ? "boot" : "init", func_id);
- mlx5_core_dbg(dev, "requested %d init pages and %d boot pages for func_id 0x%x\n",
- init_pages, boot_pages, func_id);
- return give_pages(dev, func_id, boot ? boot_pages : init_pages, 0);
+ return give_pages(dev, func_id, npages, 0);
}
static int optimal_reclaimed_pages(void)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index 92da998..9d4bb7f 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -3266,6 +3266,11 @@
u8 val;
int ret, max_sds_rings = adapter->max_sds_rings;
+ if (test_bit(__QLCNIC_RESETTING, &adapter->state)) {
+ netdev_info(netdev, "Device is resetting\n");
+ return -EBUSY;
+ }
+
if (qlcnic_get_diag_lock(adapter)) {
netdev_info(netdev, "Device in diagnostics mode\n");
return -EBUSY;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
index 9f4b8d5..345d987 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -629,7 +629,8 @@
return -EIO;
}
- qlcnic_set_drv_version(adapter);
+ if (adapter->portnum == 0)
+ qlcnic_set_drv_version(adapter);
qlcnic_83xx_idc_attach_driver(adapter);
return 0;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index ee013fc..bc05d01 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -2165,7 +2165,8 @@
if (err)
goto err_out_disable_mbx_intr;
- qlcnic_set_drv_version(adapter);
+ if (adapter->portnum == 0)
+ qlcnic_set_drv_version(adapter);
pci_set_drvdata(pdev, adapter);
@@ -3085,7 +3086,8 @@
adapter->fw_fail_cnt = 0;
adapter->flags &= ~QLCNIC_FW_HANG;
clear_bit(__QLCNIC_RESETTING, &adapter->state);
- qlcnic_set_drv_version(adapter);
+ if (adapter->portnum == 0)
+ qlcnic_set_drv_version(adapter);
if (!qlcnic_clr_drv_state(adapter))
qlcnic_schedule_work(adapter, qlcnic_fw_poll_work,
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
index 10ed82b..660c3f5 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
@@ -170,9 +170,9 @@
if (ahw->extra_capability[0] & QLCNIC_FW_CAPABILITY_2_BEACON) {
err = qlcnic_get_beacon_state(adapter, &h_beacon_state);
- if (!err) {
- dev_info(&adapter->pdev->dev,
- "Failed to get current beacon state\n");
+ if (err) {
+ netdev_err(adapter->netdev,
+ "Failed to get current beacon state\n");
} else {
if (h_beacon_state == QLCNIC_BEACON_DISABLE)
ahw->beacon_state = 0;
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index 6f35f84..d2e5919 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -524,6 +524,7 @@
PCI_DMA_FROMDEVICE);
if (dma_mapping_error(&cp->pdev->dev, new_mapping)) {
dev->stats.rx_dropped++;
+ kfree_skb(new_skb);
goto rx_next;
}
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index b5eb419..85e5c97 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -7088,7 +7088,7 @@
RTL_W8(Cfg9346, Cfg9346_Unlock);
RTL_W8(Config1, RTL_R8(Config1) | PMEnable);
- RTL_W8(Config5, RTL_R8(Config5) & PMEStatus);
+ RTL_W8(Config5, RTL_R8(Config5) & (BWF | MWF | UWF | LanWake | PMEStatus));
if ((RTL_R8(Config3) & (LinkUp | MagicPacket)) != 0)
tp->features |= RTL_FEATURE_WOL;
if ((RTL_R8(Config5) & (UWF | BWF | MWF)) != 0)
diff --git a/drivers/net/ethernet/sfc/filter.c b/drivers/net/ethernet/sfc/filter.c
index 2a469b2..30d7442 100644
--- a/drivers/net/ethernet/sfc/filter.c
+++ b/drivers/net/ethernet/sfc/filter.c
@@ -675,7 +675,7 @@
BUILD_BUG_ON(EFX_FILTER_INDEX_UC_DEF != 0);
BUILD_BUG_ON(EFX_FILTER_INDEX_MC_DEF !=
EFX_FILTER_MC_DEF - EFX_FILTER_UC_DEF);
- rep_index = spec->type - EFX_FILTER_INDEX_UC_DEF;
+ rep_index = spec->type - EFX_FILTER_UC_DEF;
ins_index = rep_index;
spin_lock_bh(&state->lock);
diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
index c9d942a..1ef9d8a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
+++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
@@ -33,10 +33,15 @@
struct stmmac_priv *priv = (struct stmmac_priv *)p;
unsigned int txsize = priv->dma_tx_size;
unsigned int entry = priv->cur_tx % txsize;
- struct dma_desc *desc = priv->dma_tx + entry;
+ struct dma_desc *desc;
unsigned int nopaged_len = skb_headlen(skb);
unsigned int bmax, len;
+ if (priv->extend_desc)
+ desc = (struct dma_desc *)(priv->dma_etx + entry);
+ else
+ desc = priv->dma_tx + entry;
+
if (priv->plat->enh_desc)
bmax = BUF_SIZE_8KiB;
else
@@ -54,7 +59,11 @@
STMMAC_RING_MODE);
wmb();
entry = (++priv->cur_tx) % txsize;
- desc = priv->dma_tx + entry;
+
+ if (priv->extend_desc)
+ desc = (struct dma_desc *)(priv->dma_etx + entry);
+ else
+ desc = priv->dma_tx + entry;
desc->des2 = dma_map_single(priv->device, skb->data + bmax,
len, DMA_TO_DEVICE);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index f2ccb36..0a9bb9d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -939,15 +939,20 @@
skb = __netdev_alloc_skb(priv->dev, priv->dma_buf_sz + NET_IP_ALIGN,
GFP_KERNEL);
- if (unlikely(skb == NULL)) {
+ if (!skb) {
pr_err("%s: Rx init fails; skb is NULL\n", __func__);
- return 1;
+ return -ENOMEM;
}
skb_reserve(skb, NET_IP_ALIGN);
priv->rx_skbuff[i] = skb;
priv->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
priv->dma_buf_sz,
DMA_FROM_DEVICE);
+ if (dma_mapping_error(priv->device, priv->rx_skbuff_dma[i])) {
+ pr_err("%s: DMA mapping error\n", __func__);
+ dev_kfree_skb_any(skb);
+ return -EINVAL;
+ }
p->des2 = priv->rx_skbuff_dma[i];
@@ -958,6 +963,16 @@
return 0;
}
+static void stmmac_free_rx_buffers(struct stmmac_priv *priv, int i)
+{
+ if (priv->rx_skbuff[i]) {
+ dma_unmap_single(priv->device, priv->rx_skbuff_dma[i],
+ priv->dma_buf_sz, DMA_FROM_DEVICE);
+ dev_kfree_skb_any(priv->rx_skbuff[i]);
+ }
+ priv->rx_skbuff[i] = NULL;
+}
+
/**
* init_dma_desc_rings - init the RX/TX descriptor rings
* @dev: net device structure
@@ -965,13 +980,14 @@
* and allocates the socket buffers. It suppors the chained and ring
* modes.
*/
-static void init_dma_desc_rings(struct net_device *dev)
+static int init_dma_desc_rings(struct net_device *dev)
{
int i;
struct stmmac_priv *priv = netdev_priv(dev);
unsigned int txsize = priv->dma_tx_size;
unsigned int rxsize = priv->dma_rx_size;
unsigned int bfsize = 0;
+ int ret = -ENOMEM;
/* Set the max buffer size according to the DESC mode
* and the MTU. Note that RING mode allows 16KiB bsize.
@@ -992,34 +1008,60 @@
dma_extended_desc),
&priv->dma_rx_phy,
GFP_KERNEL);
+ if (!priv->dma_erx)
+ goto err_dma;
+
priv->dma_etx = dma_alloc_coherent(priv->device, txsize *
sizeof(struct
dma_extended_desc),
&priv->dma_tx_phy,
GFP_KERNEL);
- if ((!priv->dma_erx) || (!priv->dma_etx))
- return;
+ if (!priv->dma_etx) {
+ dma_free_coherent(priv->device, priv->dma_rx_size *
+ sizeof(struct dma_extended_desc),
+ priv->dma_erx, priv->dma_rx_phy);
+ goto err_dma;
+ }
} else {
priv->dma_rx = dma_alloc_coherent(priv->device, rxsize *
sizeof(struct dma_desc),
&priv->dma_rx_phy,
GFP_KERNEL);
+ if (!priv->dma_rx)
+ goto err_dma;
+
priv->dma_tx = dma_alloc_coherent(priv->device, txsize *
sizeof(struct dma_desc),
&priv->dma_tx_phy,
GFP_KERNEL);
- if ((!priv->dma_rx) || (!priv->dma_tx))
- return;
+ if (!priv->dma_tx) {
+ dma_free_coherent(priv->device, priv->dma_rx_size *
+ sizeof(struct dma_desc),
+ priv->dma_rx, priv->dma_rx_phy);
+ goto err_dma;
+ }
}
priv->rx_skbuff_dma = kmalloc_array(rxsize, sizeof(dma_addr_t),
GFP_KERNEL);
+ if (!priv->rx_skbuff_dma)
+ goto err_rx_skbuff_dma;
+
priv->rx_skbuff = kmalloc_array(rxsize, sizeof(struct sk_buff *),
GFP_KERNEL);
+ if (!priv->rx_skbuff)
+ goto err_rx_skbuff;
+
priv->tx_skbuff_dma = kmalloc_array(txsize, sizeof(dma_addr_t),
GFP_KERNEL);
+ if (!priv->tx_skbuff_dma)
+ goto err_tx_skbuff_dma;
+
priv->tx_skbuff = kmalloc_array(txsize, sizeof(struct sk_buff *),
GFP_KERNEL);
+ if (!priv->tx_skbuff)
+ goto err_tx_skbuff;
+
if (netif_msg_probe(priv)) {
pr_debug("(%s) dma_rx_phy=0x%08x dma_tx_phy=0x%08x\n", __func__,
(u32) priv->dma_rx_phy, (u32) priv->dma_tx_phy);
@@ -1034,8 +1076,9 @@
else
p = priv->dma_rx + i;
- if (stmmac_init_rx_buffers(priv, p, i))
- break;
+ ret = stmmac_init_rx_buffers(priv, p, i);
+ if (ret)
+ goto err_init_rx_buffers;
if (netif_msg_probe(priv))
pr_debug("[%p]\t[%p]\t[%x]\n", priv->rx_skbuff[i],
@@ -1081,20 +1124,44 @@
if (netif_msg_hw(priv))
stmmac_display_rings(priv);
+
+ return 0;
+err_init_rx_buffers:
+ while (--i >= 0)
+ stmmac_free_rx_buffers(priv, i);
+ kfree(priv->tx_skbuff);
+err_tx_skbuff:
+ kfree(priv->tx_skbuff_dma);
+err_tx_skbuff_dma:
+ kfree(priv->rx_skbuff);
+err_rx_skbuff:
+ kfree(priv->rx_skbuff_dma);
+err_rx_skbuff_dma:
+ if (priv->extend_desc) {
+ dma_free_coherent(priv->device, priv->dma_tx_size *
+ sizeof(struct dma_extended_desc),
+ priv->dma_etx, priv->dma_tx_phy);
+ dma_free_coherent(priv->device, priv->dma_rx_size *
+ sizeof(struct dma_extended_desc),
+ priv->dma_erx, priv->dma_rx_phy);
+ } else {
+ dma_free_coherent(priv->device,
+ priv->dma_tx_size * sizeof(struct dma_desc),
+ priv->dma_tx, priv->dma_tx_phy);
+ dma_free_coherent(priv->device,
+ priv->dma_rx_size * sizeof(struct dma_desc),
+ priv->dma_rx, priv->dma_rx_phy);
+ }
+err_dma:
+ return ret;
}
static void dma_free_rx_skbufs(struct stmmac_priv *priv)
{
int i;
- for (i = 0; i < priv->dma_rx_size; i++) {
- if (priv->rx_skbuff[i]) {
- dma_unmap_single(priv->device, priv->rx_skbuff_dma[i],
- priv->dma_buf_sz, DMA_FROM_DEVICE);
- dev_kfree_skb_any(priv->rx_skbuff[i]);
- }
- priv->rx_skbuff[i] = NULL;
- }
+ for (i = 0; i < priv->dma_rx_size; i++)
+ stmmac_free_rx_buffers(priv, i);
}
static void dma_free_tx_skbufs(struct stmmac_priv *priv)
@@ -1560,12 +1627,17 @@
priv->dma_tx_size = STMMAC_ALIGN(dma_txsize);
priv->dma_rx_size = STMMAC_ALIGN(dma_rxsize);
priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
- init_dma_desc_rings(dev);
+
+ ret = init_dma_desc_rings(dev);
+ if (ret < 0) {
+ pr_err("%s: DMA descriptors initialization failed\n", __func__);
+ goto dma_desc_error;
+ }
/* DMA initialization and SW reset */
ret = stmmac_init_dma_engine(priv);
if (ret < 0) {
- pr_err("%s: DMA initialization failed\n", __func__);
+ pr_err("%s: DMA engine initialization failed\n", __func__);
goto init_error;
}
@@ -1672,6 +1744,7 @@
init_error:
free_dma_desc_resources(priv);
+dma_desc_error:
if (priv->phydev)
phy_disconnect(priv->phydev);
phy_error:
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index 1d6dc41..d01cacf 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -2100,7 +2100,7 @@
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
}
- netif_rx(skb);
+ netif_receive_skb(skb);
stats->rx_bytes += pkt_len;
stats->rx_packets++;
@@ -2884,6 +2884,7 @@
return ret;
err_iounmap:
+ netif_napi_del(&vptr->napi);
iounmap(regs);
err_free_dev:
free_netdev(netdev);
@@ -2904,6 +2905,7 @@
struct velocity_info *vptr = netdev_priv(netdev);
unregister_netdev(netdev);
+ netif_napi_del(&vptr->napi);
iounmap(vptr->mac_regs);
free_netdev(netdev);
velocity_nics--;
diff --git a/drivers/net/irda/via-ircc.c b/drivers/net/irda/via-ircc.c
index 51f2bc3..2dcc60f 100644
--- a/drivers/net/irda/via-ircc.c
+++ b/drivers/net/irda/via-ircc.c
@@ -210,8 +210,7 @@
pci_write_config_byte(pcidev,0x42,(bTmp | 0xf0));
pci_write_config_byte(pcidev,0x5a,0xc0);
WriteLPCReg(0x28, 0x70 );
- if (via_ircc_open(pcidev, &info, 0x3076) == 0)
- rc=0;
+ rc = via_ircc_open(pcidev, &info, 0x3076);
} else
rc = -ENODEV; //IR not turn on
} else { //Not VT1211
@@ -249,8 +248,7 @@
info.irq=FirIRQ;
info.dma=FirDRQ1;
info.dma2=FirDRQ0;
- if (via_ircc_open(pcidev, &info, 0x3096) == 0)
- rc=0;
+ rc = via_ircc_open(pcidev, &info, 0x3096);
} else
rc = -ENODEV; //IR not turn on !!!!!
}//Not VT1211
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index d0f9c2f..16b43bf 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -739,6 +739,10 @@
return -EADDRNOTAVAIL;
}
+ if (data && data[IFLA_MACVLAN_FLAGS] &&
+ nla_get_u16(data[IFLA_MACVLAN_FLAGS]) & ~MACVLAN_FLAG_NOPROMISC)
+ return -EINVAL;
+
if (data && data[IFLA_MACVLAN_MODE]) {
switch (nla_get_u32(data[IFLA_MACVLAN_MODE])) {
case MACVLAN_MODE_PRIVATE:
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index a98fb0e..ea53abb 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -68,6 +68,8 @@
#define TUN_OFFLOADS (NETIF_F_HW_CSUM | NETIF_F_TSO_ECN | NETIF_F_TSO | \
NETIF_F_TSO6 | NETIF_F_UFO)
#define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO)
+#define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG)
+
/*
* RCU usage:
* The macvtap_queue and the macvlan_dev are loosely coupled, the
@@ -278,7 +280,8 @@
{
struct macvlan_dev *vlan = netdev_priv(dev);
struct macvtap_queue *q = macvtap_get_queue(dev, skb);
- netdev_features_t features;
+ netdev_features_t features = TAP_FEATURES;
+
if (!q)
goto drop;
@@ -287,9 +290,11 @@
skb->dev = dev;
/* Apply the forward feature mask so that we perform segmentation
- * according to users wishes.
+ * according to users wishes. This only works if VNET_HDR is
+ * enabled.
*/
- features = netif_skb_features(skb) & vlan->tap_features;
+ if (q->flags & IFF_VNET_HDR)
+ features |= vlan->tap_features;
if (netif_needs_gso(skb, features)) {
struct sk_buff *segs = __skb_gso_segment(skb, features, false);
@@ -818,10 +823,13 @@
skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
}
- if (vlan)
+ if (vlan) {
+ local_bh_disable();
macvlan_start_xmit(skb, vlan->dev);
- else
+ local_bh_enable();
+ } else {
kfree_skb(skb);
+ }
rcu_read_unlock();
return total_len;
@@ -912,8 +920,11 @@
done:
rcu_read_lock();
vlan = rcu_dereference(q->vlan);
- if (vlan)
+ if (vlan) {
+ preempt_disable();
macvlan_count_rx(vlan, copied - vnet_hdr_len, ret == 0, 0);
+ preempt_enable();
+ }
rcu_read_unlock();
return ret ? ret : copied;
@@ -1058,8 +1069,7 @@
/* tap_features are the same as features on tun/tap and
* reflect user expectations.
*/
- vlan->tap_features = vlan->dev->features &
- (feature_mask | ~TUN_OFFLOADS);
+ vlan->tap_features = feature_mask;
vlan->set_features = features;
netdev_update_features(vlan->dev);
@@ -1155,10 +1165,6 @@
TUN_F_TSO_ECN | TUN_F_UFO))
return -EINVAL;
- /* TODO: only accept frames with the features that
- got enabled for forwarded frames */
- if (!(q->flags & IFF_VNET_HDR))
- return -EINVAL;
rtnl_lock();
ret = set_offload(q, arg);
rtnl_unlock();
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index 8e7af83..138de83 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -23,7 +23,7 @@
#define RTL821x_INER_INIT 0x6400
#define RTL821x_INSR 0x13
-#define RTL8211E_INER_LINK_STAT 0x10
+#define RTL8211E_INER_LINK_STATUS 0x400
MODULE_DESCRIPTION("Realtek PHY driver");
MODULE_AUTHOR("Johnson Leung");
@@ -57,7 +57,7 @@
if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
err = phy_write(phydev, RTL821x_INER,
- RTL8211E_INER_LINK_STAT);
+ RTL8211E_INER_LINK_STATUS);
else
err = phy_write(phydev, RTL821x_INER, 0);
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index db690a3..71af122 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1074,8 +1074,9 @@
u32 rxhash;
if (!(tun->flags & TUN_NO_PI)) {
- if ((len -= sizeof(pi)) > total_len)
+ if (len < sizeof(pi))
return -EINVAL;
+ len -= sizeof(pi);
if (memcpy_fromiovecend((void *)&pi, iv, 0, sizeof(pi)))
return -EFAULT;
@@ -1083,8 +1084,9 @@
}
if (tun->flags & TUN_VNET_HDR) {
- if ((len -= tun->vnet_hdr_sz) > total_len)
+ if (len < tun->vnet_hdr_sz)
return -EINVAL;
+ len -= tun->vnet_hdr_sz;
if (memcpy_fromiovecend((void *)&gso, iv, offset, sizeof(gso)))
return -EFAULT;
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index cba1d46..86292e6 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -2816,13 +2816,16 @@
static int hso_get_config_data(struct usb_interface *interface)
{
struct usb_device *usbdev = interface_to_usbdev(interface);
- u8 config_data[17];
+ u8 *config_data = kmalloc(17, GFP_KERNEL);
u32 if_num = interface->altsetting->desc.bInterfaceNumber;
s32 result;
+ if (!config_data)
+ return -ENOMEM;
if (usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0),
0x86, 0xC0, 0, 0, config_data, 17,
USB_CTRL_SET_TIMEOUT) != 0x11) {
+ kfree(config_data);
return -EIO;
}
@@ -2873,6 +2876,7 @@
if (config_data[16] & 0x1)
result |= HSO_INFO_CRC_BUG;
+ kfree(config_data);
return result;
}
@@ -2886,6 +2890,11 @@
struct hso_shared_int *shared_int;
struct hso_device *tmp_dev = NULL;
+ if (interface->cur_altsetting->desc.bInterfaceClass != 0xFF) {
+ dev_err(&interface->dev, "Not our interface\n");
+ return -ENODEV;
+ }
+
if_num = interface->altsetting->desc.bInterfaceNumber;
/* Get the interface/port specification from either driver_info or from
@@ -2895,10 +2904,6 @@
else
port_spec = hso_get_config_data(interface);
- if (interface->cur_altsetting->desc.bInterfaceClass != 0xFF) {
- dev_err(&interface->dev, "Not our interface\n");
- return -ENODEV;
- }
/* Check if we need to switch to alt interfaces prior to port
* configuration */
if (interface->num_altsetting > 1)
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index f4c6db4..767f7af 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -1386,7 +1386,7 @@
return -ENOTCONN;
if (IN_MULTICAST(ntohl(vxlan->default_dst.remote_ip)) &&
- ! vxlan_group_used(vn, vxlan->default_dst.remote_ip)) {
+ vxlan_group_used(vn, vxlan->default_dst.remote_ip)) {
vxlan_sock_hold(vs);
dev_hold(dev);
queue_work(vxlan_wq, &vxlan->igmp_join);
@@ -1793,8 +1793,6 @@
struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
struct vxlan_dev *vxlan = netdev_priv(dev);
- flush_workqueue(vxlan_wq);
-
spin_lock(&vn->sock_lock);
hlist_del_rcu(&vxlan->hlist);
spin_unlock(&vn->sock_lock);
diff --git a/drivers/net/wireless/cw1200/sta.c b/drivers/net/wireless/cw1200/sta.c
index 7365674..010b252 100644
--- a/drivers/net/wireless/cw1200/sta.c
+++ b/drivers/net/wireless/cw1200/sta.c
@@ -1406,11 +1406,8 @@
if (!priv->join_status)
goto done;
- if (priv->join_status > CW1200_JOIN_STATUS_IBSS) {
- wiphy_err(priv->hw->wiphy, "Unexpected: join status: %d\n",
- priv->join_status);
- BUG_ON(1);
- }
+ if (priv->join_status == CW1200_JOIN_STATUS_AP)
+ goto done;
cancel_work_sync(&priv->update_filtering_work);
cancel_work_sync(&priv->set_beacon_wakeup_period_work);
diff --git a/drivers/net/wireless/hostap/hostap_ioctl.c b/drivers/net/wireless/hostap/hostap_ioctl.c
index ac07473..e509030 100644
--- a/drivers/net/wireless/hostap/hostap_ioctl.c
+++ b/drivers/net/wireless/hostap/hostap_ioctl.c
@@ -523,9 +523,9 @@
data->length = prism2_ap_get_sta_qual(local, addr, qual, IW_MAX_AP, 1);
- memcpy(extra, &addr, sizeof(struct sockaddr) * data->length);
+ memcpy(extra, addr, sizeof(struct sockaddr) * data->length);
data->flags = 1; /* has quality information */
- memcpy(extra + sizeof(struct sockaddr) * data->length, &qual,
+ memcpy(extra + sizeof(struct sockaddr) * data->length, qual,
sizeof(struct iw_quality) * data->length);
kfree(addr);
diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c
index b9b2bb5..f2ed62e 100644
--- a/drivers/net/wireless/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/iwlegacy/4965-mac.c
@@ -4460,12 +4460,12 @@
* is killed. Hence update the killswitch state here. The
* rfkill handler will care about restarting if needed.
*/
- if (!test_bit(S_ALIVE, &il->status)) {
- if (hw_rf_kill)
- set_bit(S_RFKILL, &il->status);
- else
- clear_bit(S_RFKILL, &il->status);
+ if (hw_rf_kill) {
+ set_bit(S_RFKILL, &il->status);
+ } else {
+ clear_bit(S_RFKILL, &il->status);
wiphy_rfkill_set_hw_state(il->hw->wiphy, hw_rf_kill);
+ il_force_reset(il, true);
}
handled |= CSR_INT_BIT_RF_KILL;
@@ -5334,6 +5334,9 @@
il->active_rate = RATES_MASK;
+ il_power_update_mode(il, true);
+ D_INFO("Updated power mode\n");
+
if (il_is_associated(il)) {
struct il_rxon_cmd *active_rxon =
(struct il_rxon_cmd *)&il->active;
@@ -5364,9 +5367,6 @@
D_INFO("ALIVE processing complete.\n");
wake_up(&il->wait_command_queue);
- il_power_update_mode(il, true);
- D_INFO("Updated power mode\n");
-
return;
restart:
diff --git a/drivers/net/wireless/iwlegacy/common.c b/drivers/net/wireless/iwlegacy/common.c
index 3195aad..b03e22e 100644
--- a/drivers/net/wireless/iwlegacy/common.c
+++ b/drivers/net/wireless/iwlegacy/common.c
@@ -4660,6 +4660,7 @@
return 0;
}
+EXPORT_SYMBOL(il_force_reset);
int
il_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
diff --git a/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
index 822f1a0..3193872 100644
--- a/drivers/net/wireless/iwlwifi/dvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
@@ -1068,7 +1068,10 @@
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return;
- if (test_and_clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
+ if (!test_and_clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
+ return;
+
+ if (ctx->vif)
ieee80211_chswitch_done(ctx->vif, is_success);
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h
index a70c7b9..ff8cc75 100644
--- a/drivers/net/wireless/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/iwlwifi/iwl-prph.h
@@ -97,8 +97,6 @@
#define APMG_PCIDEV_STT_VAL_L1_ACT_DIS (0x00000800)
-#define APMG_RTC_INT_STT_RFKILL (0x10000000)
-
/* Device system time */
#define DEVICE_SYSTEM_TIME_REG 0xA0206C
diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.c b/drivers/net/wireless/iwlwifi/mvm/time-event.c
index ad9bbca..7fd6fbf 100644
--- a/drivers/net/wireless/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/iwlwifi/mvm/time-event.c
@@ -138,6 +138,20 @@
schedule_work(&mvm->roc_done_wk);
}
+static bool iwl_mvm_te_check_disconnect(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ const char *errmsg)
+{
+ if (vif->type != NL80211_IFTYPE_STATION)
+ return false;
+ if (vif->bss_conf.assoc && vif->bss_conf.dtim_period)
+ return false;
+ if (errmsg)
+ IWL_ERR(mvm, "%s\n", errmsg);
+ ieee80211_connection_loss(vif);
+ return true;
+}
+
/*
* Handles a FW notification for an event that is known to the driver.
*
@@ -163,8 +177,13 @@
* P2P Device discoveribility, while there are other higher priority
* events in the system).
*/
- WARN_ONCE(!le32_to_cpu(notif->status),
- "Failed to schedule time event\n");
+ if (WARN_ONCE(!le32_to_cpu(notif->status),
+ "Failed to schedule time event\n")) {
+ if (iwl_mvm_te_check_disconnect(mvm, te_data->vif, NULL)) {
+ iwl_mvm_te_clear_data(mvm, te_data);
+ return;
+ }
+ }
if (le32_to_cpu(notif->action) & TE_NOTIF_HOST_EVENT_END) {
IWL_DEBUG_TE(mvm,
@@ -180,14 +199,8 @@
* By now, we should have finished association
* and know the dtim period.
*/
- if (te_data->vif->type == NL80211_IFTYPE_STATION &&
- (!te_data->vif->bss_conf.assoc ||
- !te_data->vif->bss_conf.dtim_period)) {
- IWL_ERR(mvm,
- "No assocation and the time event is over already...\n");
- ieee80211_connection_loss(te_data->vif);
- }
-
+ iwl_mvm_te_check_disconnect(mvm, te_data->vif,
+ "No assocation and the time event is over already...");
iwl_mvm_te_clear_data(mvm, te_data);
} else if (le32_to_cpu(notif->action) & TE_NOTIF_HOST_EVENT_START) {
te_data->running = true;
diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c
index f600e68..fd848cd 100644
--- a/drivers/net/wireless/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/rx.c
@@ -888,14 +888,6 @@
iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
if (hw_rfkill) {
- /*
- * Clear the interrupt in APMG if the NIC is going down.
- * Note that when the NIC exits RFkill (else branch), we
- * can't access prph and the NIC will be reset in
- * start_hw anyway.
- */
- iwl_write_prph(trans, APMG_RTC_INT_STT_REG,
- APMG_RTC_INT_STT_RFKILL);
set_bit(STATUS_RFKILL, &trans_pcie->status);
if (test_and_clear_bit(STATUS_HCMD_ACTIVE,
&trans_pcie->status))
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index 96cfcdd..390e2f0 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -1502,16 +1502,16 @@
spin_lock_init(&trans_pcie->reg_lock);
init_waitqueue_head(&trans_pcie->ucode_write_waitq);
- /* W/A - seems to solve weird behavior. We need to remove this if we
- * don't want to stay in L1 all the time. This wastes a lot of power */
- pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
- PCIE_LINK_STATE_CLKPM);
-
if (pci_enable_device(pdev)) {
err = -ENODEV;
goto out_no_pci;
}
+ /* W/A - seems to solve weird behavior. We need to remove this if we
+ * don't want to stay in L1 all the time. This wastes a lot of power */
+ pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
+ PCIE_LINK_STATE_CLKPM);
+
pci_set_master(pdev);
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
diff --git a/drivers/net/wireless/zd1201.c b/drivers/net/wireless/zd1201.c
index 4941f20..b8ba1f9 100644
--- a/drivers/net/wireless/zd1201.c
+++ b/drivers/net/wireless/zd1201.c
@@ -98,10 +98,12 @@
goto exit;
err = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), 0x4,
- USB_DIR_IN | 0x40, 0,0, &ret, sizeof(ret), ZD1201_FW_TIMEOUT);
+ USB_DIR_IN | 0x40, 0, 0, buf, sizeof(ret), ZD1201_FW_TIMEOUT);
if (err < 0)
goto exit;
+ memcpy(&ret, buf, sizeof(ret));
+
if (ret & 0x80) {
err = -EIO;
goto exit;
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 6bb7cf2..b10ba00 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -392,6 +392,8 @@
mem = (unsigned long)
dt_alloc(size + 4, __alignof__(struct device_node));
+ memset((void *)mem, 0, size);
+
((__be32 *)mem)[size / 4] = cpu_to_be32(0xdeadbeef);
pr_debug(" unflattening %lx...\n", mem);
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index dbdc5f7..01e264f 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -317,13 +317,20 @@
/* ACPI bus type */
static int acpi_pci_find_device(struct device *dev, acpi_handle *handle)
{
- struct pci_dev * pci_dev;
- u64 addr;
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ bool is_bridge;
+ u64 addr;
- pci_dev = to_pci_dev(dev);
+ /*
+ * pci_is_bridge() is not suitable here, because pci_dev->subordinate
+ * is set only after acpi_pci_find_device() has been called for the
+ * given device.
+ */
+ is_bridge = pci_dev->hdr_type == PCI_HEADER_TYPE_BRIDGE
+ || pci_dev->hdr_type == PCI_HEADER_TYPE_CARDBUS;
/* Please ref to ACPI spec for the syntax of _ADR */
addr = (PCI_SLOT(pci_dev->devfn) << 16) | PCI_FUNC(pci_dev->devfn);
- *handle = acpi_get_child(DEVICE_ACPI_HANDLE(dev->parent), addr);
+ *handle = acpi_find_child(ACPI_HANDLE(dev->parent), addr, is_bridge);
if (!*handle)
return -ENODEV;
return 0;
diff --git a/drivers/pinctrl/pinctrl-sunxi.c b/drivers/pinctrl/pinctrl-sunxi.c
index c47fd1e..94716c7 100644
--- a/drivers/pinctrl/pinctrl-sunxi.c
+++ b/drivers/pinctrl/pinctrl-sunxi.c
@@ -278,6 +278,7 @@
{
struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
struct sunxi_pinctrl_group *g = &pctl->groups[group];
+ unsigned long flags;
u32 val, mask;
u16 strength;
u8 dlevel;
@@ -295,22 +296,35 @@
* 3: 40mA
*/
dlevel = strength / 10 - 1;
+
+ spin_lock_irqsave(&pctl->lock, flags);
+
val = readl(pctl->membase + sunxi_dlevel_reg(g->pin));
mask = DLEVEL_PINS_MASK << sunxi_dlevel_offset(g->pin);
writel((val & ~mask) | dlevel << sunxi_dlevel_offset(g->pin),
pctl->membase + sunxi_dlevel_reg(g->pin));
+
+ spin_unlock_irqrestore(&pctl->lock, flags);
break;
case PIN_CONFIG_BIAS_PULL_UP:
+ spin_lock_irqsave(&pctl->lock, flags);
+
val = readl(pctl->membase + sunxi_pull_reg(g->pin));
mask = PULL_PINS_MASK << sunxi_pull_offset(g->pin);
writel((val & ~mask) | 1 << sunxi_pull_offset(g->pin),
pctl->membase + sunxi_pull_reg(g->pin));
+
+ spin_unlock_irqrestore(&pctl->lock, flags);
break;
case PIN_CONFIG_BIAS_PULL_DOWN:
+ spin_lock_irqsave(&pctl->lock, flags);
+
val = readl(pctl->membase + sunxi_pull_reg(g->pin));
mask = PULL_PINS_MASK << sunxi_pull_offset(g->pin);
writel((val & ~mask) | 2 << sunxi_pull_offset(g->pin),
pctl->membase + sunxi_pull_reg(g->pin));
+
+ spin_unlock_irqrestore(&pctl->lock, flags);
break;
default:
break;
@@ -360,11 +374,17 @@
u8 config)
{
struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+ unsigned long flags;
+ u32 val, mask;
- u32 val = readl(pctl->membase + sunxi_mux_reg(pin));
- u32 mask = MUX_PINS_MASK << sunxi_mux_offset(pin);
+ spin_lock_irqsave(&pctl->lock, flags);
+
+ val = readl(pctl->membase + sunxi_mux_reg(pin));
+ mask = MUX_PINS_MASK << sunxi_mux_offset(pin);
writel((val & ~mask) | config << sunxi_mux_offset(pin),
pctl->membase + sunxi_mux_reg(pin));
+
+ spin_unlock_irqrestore(&pctl->lock, flags);
}
static int sunxi_pmx_enable(struct pinctrl_dev *pctldev,
@@ -464,8 +484,21 @@
struct sunxi_pinctrl *pctl = dev_get_drvdata(chip->dev);
u32 reg = sunxi_data_reg(offset);
u8 index = sunxi_data_offset(offset);
+ unsigned long flags;
+ u32 regval;
- writel((value & DATA_PINS_MASK) << index, pctl->membase + reg);
+ spin_lock_irqsave(&pctl->lock, flags);
+
+ regval = readl(pctl->membase + reg);
+
+ if (value)
+ regval |= BIT(index);
+ else
+ regval &= ~(BIT(index));
+
+ writel(regval, pctl->membase + reg);
+
+ spin_unlock_irqrestore(&pctl->lock, flags);
}
static int sunxi_pinctrl_gpio_of_xlate(struct gpio_chip *gc,
@@ -526,6 +559,8 @@
struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d);
u32 reg = sunxi_irq_cfg_reg(d->hwirq);
u8 index = sunxi_irq_cfg_offset(d->hwirq);
+ unsigned long flags;
+ u32 regval;
u8 mode;
switch (type) {
@@ -548,7 +583,13 @@
return -EINVAL;
}
- writel((mode & IRQ_CFG_IRQ_MASK) << index, pctl->membase + reg);
+ spin_lock_irqsave(&pctl->lock, flags);
+
+ regval = readl(pctl->membase + reg);
+ regval &= ~IRQ_CFG_IRQ_MASK;
+ writel(regval | (mode << index), pctl->membase + reg);
+
+ spin_unlock_irqrestore(&pctl->lock, flags);
return 0;
}
@@ -560,14 +601,19 @@
u8 ctrl_idx = sunxi_irq_ctrl_offset(d->hwirq);
u32 status_reg = sunxi_irq_status_reg(d->hwirq);
u8 status_idx = sunxi_irq_status_offset(d->hwirq);
+ unsigned long flags;
u32 val;
+ spin_lock_irqsave(&pctl->lock, flags);
+
/* Mask the IRQ */
val = readl(pctl->membase + ctrl_reg);
writel(val & ~(1 << ctrl_idx), pctl->membase + ctrl_reg);
/* Clear the IRQ */
writel(1 << status_idx, pctl->membase + status_reg);
+
+ spin_unlock_irqrestore(&pctl->lock, flags);
}
static void sunxi_pinctrl_irq_mask(struct irq_data *d)
@@ -575,11 +621,16 @@
struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d);
u32 reg = sunxi_irq_ctrl_reg(d->hwirq);
u8 idx = sunxi_irq_ctrl_offset(d->hwirq);
+ unsigned long flags;
u32 val;
+ spin_lock_irqsave(&pctl->lock, flags);
+
/* Mask the IRQ */
val = readl(pctl->membase + reg);
writel(val & ~(1 << idx), pctl->membase + reg);
+
+ spin_unlock_irqrestore(&pctl->lock, flags);
}
static void sunxi_pinctrl_irq_unmask(struct irq_data *d)
@@ -588,6 +639,7 @@
struct sunxi_desc_function *func;
u32 reg = sunxi_irq_ctrl_reg(d->hwirq);
u8 idx = sunxi_irq_ctrl_offset(d->hwirq);
+ unsigned long flags;
u32 val;
func = sunxi_pinctrl_desc_find_function_by_pin(pctl,
@@ -597,9 +649,13 @@
/* Change muxing to INT mode */
sunxi_pmx_set(pctl->pctl_dev, pctl->irq_array[d->hwirq], func->muxval);
+ spin_lock_irqsave(&pctl->lock, flags);
+
/* Unmask the IRQ */
val = readl(pctl->membase + reg);
writel(val | (1 << idx), pctl->membase + reg);
+
+ spin_unlock_irqrestore(&pctl->lock, flags);
}
static struct irq_chip sunxi_pinctrl_irq_chip = {
@@ -752,6 +808,8 @@
return -ENOMEM;
platform_set_drvdata(pdev, pctl);
+ spin_lock_init(&pctl->lock);
+
pctl->membase = of_iomap(node, 0);
if (!pctl->membase)
return -ENOMEM;
diff --git a/drivers/pinctrl/pinctrl-sunxi.h b/drivers/pinctrl/pinctrl-sunxi.h
index d68047d..01c494f 100644
--- a/drivers/pinctrl/pinctrl-sunxi.h
+++ b/drivers/pinctrl/pinctrl-sunxi.h
@@ -14,6 +14,7 @@
#define __PINCTRL_SUNXI_H
#include <linux/kernel.h>
+#include <linux/spinlock.h>
#define PA_BASE 0
#define PB_BASE 32
@@ -407,6 +408,7 @@
unsigned ngroups;
int irq;
int irq_array[SUNXI_IRQ_NUMBER];
+ spinlock_t lock;
struct pinctrl_dev *pctl_dev;
};
diff --git a/drivers/platform/olpc/olpc-ec.c b/drivers/platform/olpc/olpc-ec.c
index 0f9f859..f911952 100644
--- a/drivers/platform/olpc/olpc-ec.c
+++ b/drivers/platform/olpc/olpc-ec.c
@@ -330,7 +330,7 @@
return platform_driver_register(&olpc_ec_plat_driver);
}
-module_init(olpc_ec_init_module);
+arch_initcall(olpc_ec_init_module);
MODULE_AUTHOR("Andres Salomon <dilinger@queued.net>");
MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
index 97bb05e..d6970f4 100644
--- a/drivers/platform/x86/hp-wmi.c
+++ b/drivers/platform/x86/hp-wmi.c
@@ -53,7 +53,6 @@
#define HPWMI_ALS_QUERY 0x3
#define HPWMI_HARDWARE_QUERY 0x4
#define HPWMI_WIRELESS_QUERY 0x5
-#define HPWMI_BIOS_QUERY 0x9
#define HPWMI_HOTKEY_QUERY 0xc
#define HPWMI_WIRELESS2_QUERY 0x1b
#define HPWMI_POSTCODEERROR_QUERY 0x2a
@@ -293,19 +292,6 @@
return (state & 0x4) ? 1 : 0;
}
-static int hp_wmi_enable_hotkeys(void)
-{
- int ret;
- int query = 0x6e;
-
- ret = hp_wmi_perform_query(HPWMI_BIOS_QUERY, 1, &query, sizeof(query),
- 0);
-
- if (ret)
- return -EINVAL;
- return 0;
-}
-
static int hp_wmi_set_block(void *data, bool blocked)
{
enum hp_wmi_radio r = (enum hp_wmi_radio) data;
@@ -1009,8 +995,6 @@
err = hp_wmi_input_setup();
if (err)
return err;
-
- hp_wmi_enable_hotkeys();
}
if (bios_capable) {
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index 2ac045f..3a1b6bf 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -2440,7 +2440,10 @@
if (pos < 0)
return pos;
- return snprintf(buffer, PAGE_SIZE, "%s\n", pos ? "speed" : "stamina");
+ return snprintf(buffer, PAGE_SIZE, "%s\n",
+ pos == SPEED ? "speed" :
+ pos == STAMINA ? "stamina" :
+ pos == AUTO ? "auto" : "unknown");
}
static int sony_nc_gfx_switch_setup(struct platform_device *pd,
@@ -4320,7 +4323,8 @@
goto err_free_resources;
}
- if (sonypi_compat_init())
+ result = sonypi_compat_init();
+ if (result)
goto err_remove_input;
/* request io port */
diff --git a/drivers/rtc/rtc-stmp3xxx.c b/drivers/rtc/rtc-stmp3xxx.c
index 767fee2..2601953 100644
--- a/drivers/rtc/rtc-stmp3xxx.c
+++ b/drivers/rtc/rtc-stmp3xxx.c
@@ -23,6 +23,7 @@
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
+#include <linux/delay.h>
#include <linux/rtc.h>
#include <linux/slab.h>
#include <linux/of_device.h>
@@ -119,24 +120,39 @@
}
#endif /* CONFIG_STMP3XXX_RTC_WATCHDOG */
-static void stmp3xxx_wait_time(struct stmp3xxx_rtc_data *rtc_data)
+static int stmp3xxx_wait_time(struct stmp3xxx_rtc_data *rtc_data)
{
+ int timeout = 5000; /* 3ms according to i.MX28 Ref Manual */
/*
- * The datasheet doesn't say which way round the
- * NEW_REGS/STALE_REGS bitfields go. In fact it's 0x1=P0,
- * 0x2=P1, .., 0x20=P5, 0x40=ALARM, 0x80=SECONDS
+ * The i.MX28 Applications Processor Reference Manual, Rev. 1, 2010
+ * states:
+ * | The order in which registers are updated is
+ * | Persistent 0, 1, 2, 3, 4, 5, Alarm, Seconds.
+ * | (This list is in bitfield order, from LSB to MSB, as they would
+ * | appear in the STALE_REGS and NEW_REGS bitfields of the HW_RTC_STAT
+ * | register. For example, the Seconds register corresponds to
+ * | STALE_REGS or NEW_REGS containing 0x80.)
*/
- while (readl(rtc_data->io + STMP3XXX_RTC_STAT) &
- (0x80 << STMP3XXX_RTC_STAT_STALE_SHIFT))
- cpu_relax();
+ do {
+ if (!(readl(rtc_data->io + STMP3XXX_RTC_STAT) &
+ (0x80 << STMP3XXX_RTC_STAT_STALE_SHIFT)))
+ return 0;
+ udelay(1);
+ } while (--timeout > 0);
+ return (readl(rtc_data->io + STMP3XXX_RTC_STAT) &
+ (0x80 << STMP3XXX_RTC_STAT_STALE_SHIFT)) ? -ETIME : 0;
}
/* Time read/write */
static int stmp3xxx_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm)
{
+ int ret;
struct stmp3xxx_rtc_data *rtc_data = dev_get_drvdata(dev);
- stmp3xxx_wait_time(rtc_data);
+ ret = stmp3xxx_wait_time(rtc_data);
+ if (ret)
+ return ret;
+
rtc_time_to_tm(readl(rtc_data->io + STMP3XXX_RTC_SECONDS), rtc_tm);
return 0;
}
@@ -146,8 +162,7 @@
struct stmp3xxx_rtc_data *rtc_data = dev_get_drvdata(dev);
writel(t, rtc_data->io + STMP3XXX_RTC_SECONDS);
- stmp3xxx_wait_time(rtc_data);
- return 0;
+ return stmp3xxx_wait_time(rtc_data);
}
/* interrupt(s) handler */
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 17150a7..451bf99 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -2392,6 +2392,12 @@
rc = cqr->intrc;
else
rc = -EIO;
+
+ /* kick tasklets */
+ dasd_schedule_device_bh(device);
+ if (device->block)
+ dasd_schedule_block_bh(device->block);
+
return rc;
}
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index 1d4c8fe..c82fe65 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -102,10 +102,13 @@
if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_INUSE)
zfcp_erp_action_dismiss(&port->erp_action);
- else
- shost_for_each_device(sdev, port->adapter->scsi_host)
+ else {
+ spin_lock(port->adapter->scsi_host->host_lock);
+ __shost_for_each_device(sdev, port->adapter->scsi_host)
if (sdev_to_zfcp(sdev)->port == port)
zfcp_erp_action_dismiss_lun(sdev);
+ spin_unlock(port->adapter->scsi_host->host_lock);
+ }
}
static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter)
@@ -592,9 +595,11 @@
{
struct scsi_device *sdev;
- shost_for_each_device(sdev, port->adapter->scsi_host)
+ spin_lock(port->adapter->scsi_host->host_lock);
+ __shost_for_each_device(sdev, port->adapter->scsi_host)
if (sdev_to_zfcp(sdev)->port == port)
_zfcp_erp_lun_reopen(sdev, clear, id, 0);
+ spin_unlock(port->adapter->scsi_host->host_lock);
}
static void zfcp_erp_strategy_followup_failed(struct zfcp_erp_action *act)
@@ -1434,8 +1439,10 @@
atomic_set_mask(common_mask, &port->status);
read_unlock_irqrestore(&adapter->port_list_lock, flags);
- shost_for_each_device(sdev, adapter->scsi_host)
+ spin_lock_irqsave(adapter->scsi_host->host_lock, flags);
+ __shost_for_each_device(sdev, adapter->scsi_host)
atomic_set_mask(common_mask, &sdev_to_zfcp(sdev)->status);
+ spin_unlock_irqrestore(adapter->scsi_host->host_lock, flags);
}
/**
@@ -1469,11 +1476,13 @@
}
read_unlock_irqrestore(&adapter->port_list_lock, flags);
- shost_for_each_device(sdev, adapter->scsi_host) {
+ spin_lock_irqsave(adapter->scsi_host->host_lock, flags);
+ __shost_for_each_device(sdev, adapter->scsi_host) {
atomic_clear_mask(common_mask, &sdev_to_zfcp(sdev)->status);
if (clear_counter)
atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0);
}
+ spin_unlock_irqrestore(adapter->scsi_host->host_lock, flags);
}
/**
@@ -1487,16 +1496,19 @@
{
struct scsi_device *sdev;
u32 common_mask = mask & ZFCP_COMMON_FLAGS;
+ unsigned long flags;
atomic_set_mask(mask, &port->status);
if (!common_mask)
return;
- shost_for_each_device(sdev, port->adapter->scsi_host)
+ spin_lock_irqsave(port->adapter->scsi_host->host_lock, flags);
+ __shost_for_each_device(sdev, port->adapter->scsi_host)
if (sdev_to_zfcp(sdev)->port == port)
atomic_set_mask(common_mask,
&sdev_to_zfcp(sdev)->status);
+ spin_unlock_irqrestore(port->adapter->scsi_host->host_lock, flags);
}
/**
@@ -1511,6 +1523,7 @@
struct scsi_device *sdev;
u32 common_mask = mask & ZFCP_COMMON_FLAGS;
u32 clear_counter = mask & ZFCP_STATUS_COMMON_ERP_FAILED;
+ unsigned long flags;
atomic_clear_mask(mask, &port->status);
@@ -1520,13 +1533,15 @@
if (clear_counter)
atomic_set(&port->erp_counter, 0);
- shost_for_each_device(sdev, port->adapter->scsi_host)
+ spin_lock_irqsave(port->adapter->scsi_host->host_lock, flags);
+ __shost_for_each_device(sdev, port->adapter->scsi_host)
if (sdev_to_zfcp(sdev)->port == port) {
atomic_clear_mask(common_mask,
&sdev_to_zfcp(sdev)->status);
if (clear_counter)
atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0);
}
+ spin_unlock_irqrestore(port->adapter->scsi_host->host_lock, flags);
}
/**
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index 665e3cf..de0598e 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -224,11 +224,9 @@
static int zfcp_qdio_sbal_check(struct zfcp_qdio *qdio)
{
- spin_lock_irq(&qdio->req_q_lock);
if (atomic_read(&qdio->req_q_free) ||
!(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
return 1;
- spin_unlock_irq(&qdio->req_q_lock);
return 0;
}
@@ -246,9 +244,8 @@
{
long ret;
- spin_unlock_irq(&qdio->req_q_lock);
- ret = wait_event_interruptible_timeout(qdio->req_q_wq,
- zfcp_qdio_sbal_check(qdio), 5 * HZ);
+ ret = wait_event_interruptible_lock_irq_timeout(qdio->req_q_wq,
+ zfcp_qdio_sbal_check(qdio), qdio->req_q_lock, 5 * HZ);
if (!(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
return -EIO;
@@ -262,7 +259,6 @@
zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdsbg_1");
}
- spin_lock_irq(&qdio->req_q_lock);
return -EIO;
}
diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c
index 3f01bbf..8906392 100644
--- a/drivers/s390/scsi/zfcp_sysfs.c
+++ b/drivers/s390/scsi/zfcp_sysfs.c
@@ -27,6 +27,16 @@
static ZFCP_DEV_ATTR(_feat, _name, S_IRUGO, \
zfcp_sysfs_##_feat##_##_name##_show, NULL);
+#define ZFCP_DEFINE_ATTR_CONST(_feat, _name, _format, _value) \
+static ssize_t zfcp_sysfs_##_feat##_##_name##_show(struct device *dev, \
+ struct device_attribute *at,\
+ char *buf) \
+{ \
+ return sprintf(buf, _format, _value); \
+} \
+static ZFCP_DEV_ATTR(_feat, _name, S_IRUGO, \
+ zfcp_sysfs_##_feat##_##_name##_show, NULL);
+
#define ZFCP_DEFINE_A_ATTR(_name, _format, _value) \
static ssize_t zfcp_sysfs_adapter_##_name##_show(struct device *dev, \
struct device_attribute *at,\
@@ -75,6 +85,8 @@
ZFCP_DEFINE_ATTR(zfcp_unit, unit, access_denied, "%d\n",
(zfcp_unit_sdev_status(unit) &
ZFCP_STATUS_COMMON_ACCESS_DENIED) != 0);
+ZFCP_DEFINE_ATTR_CONST(unit, access_shared, "%d\n", 0);
+ZFCP_DEFINE_ATTR_CONST(unit, access_readonly, "%d\n", 0);
static ssize_t zfcp_sysfs_port_failed_show(struct device *dev,
struct device_attribute *attr,
@@ -347,6 +359,8 @@
&dev_attr_unit_in_recovery.attr,
&dev_attr_unit_status.attr,
&dev_attr_unit_access_denied.attr,
+ &dev_attr_unit_access_shared.attr,
+ &dev_attr_unit_access_readonly.attr,
NULL
};
static struct attribute_group zfcp_unit_attr_group = {
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 48b2918..92ff027 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -1353,7 +1353,6 @@
tristate "Emulex LightPulse Fibre Channel Support"
depends on PCI && SCSI
select SCSI_FC_ATTRS
- select GENERIC_CSUM
select CRC_T10DIF
help
This lpfc driver supports the Emulex LightPulse
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h
index b6d1f92..c18c681 100644
--- a/drivers/scsi/fnic/fnic.h
+++ b/drivers/scsi/fnic/fnic.h
@@ -38,7 +38,7 @@
#define DRV_NAME "fnic"
#define DRV_DESCRIPTION "Cisco FCoE HBA Driver"
-#define DRV_VERSION "1.5.0.22"
+#define DRV_VERSION "1.5.0.23"
#define PFX DRV_NAME ": "
#define DFX DRV_NAME "%d: "
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
index 5f09d18..42e15ee 100644
--- a/drivers/scsi/fnic/fnic_main.c
+++ b/drivers/scsi/fnic/fnic_main.c
@@ -642,19 +642,6 @@
INIT_WORK(&fnic->fip_frame_work, fnic_handle_fip_frame);
INIT_WORK(&fnic->event_work, fnic_handle_event);
skb_queue_head_init(&fnic->fip_frame_queue);
- spin_lock_irqsave(&fnic_list_lock, flags);
- if (!fnic_fip_queue) {
- fnic_fip_queue =
- create_singlethread_workqueue("fnic_fip_q");
- if (!fnic_fip_queue) {
- spin_unlock_irqrestore(&fnic_list_lock, flags);
- printk(KERN_ERR PFX "fnic FIP work queue "
- "create failed\n");
- err = -ENOMEM;
- goto err_out_free_max_pool;
- }
- }
- spin_unlock_irqrestore(&fnic_list_lock, flags);
INIT_LIST_HEAD(&fnic->evlist);
INIT_LIST_HEAD(&fnic->vlans);
} else {
@@ -960,6 +947,13 @@
spin_lock_init(&fnic_list_lock);
INIT_LIST_HEAD(&fnic_list);
+ fnic_fip_queue = create_singlethread_workqueue("fnic_fip_q");
+ if (!fnic_fip_queue) {
+ printk(KERN_ERR PFX "fnic FIP work queue create failed\n");
+ err = -ENOMEM;
+ goto err_create_fip_workq;
+ }
+
fnic_fc_transport = fc_attach_transport(&fnic_fc_functions);
if (!fnic_fc_transport) {
printk(KERN_ERR PFX "fc_attach_transport error\n");
@@ -978,6 +972,8 @@
err_pci_register:
fc_release_transport(fnic_fc_transport);
err_fc_transport:
+ destroy_workqueue(fnic_fip_queue);
+err_create_fip_workq:
destroy_workqueue(fnic_event_queue);
err_create_fnic_workq:
kmem_cache_destroy(fnic_io_req_cache);
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 0177295..1f0ca68 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -3547,11 +3547,21 @@
break;
}
- /*
- * We expect the FW state to be READY
- */
- if (megasas_transition_to_ready(instance, 0))
- goto fail_ready_state;
+ if (megasas_transition_to_ready(instance, 0)) {
+ atomic_set(&instance->fw_reset_no_pci_access, 1);
+ instance->instancet->adp_reset
+ (instance, instance->reg_set);
+ atomic_set(&instance->fw_reset_no_pci_access, 0);
+ dev_info(&instance->pdev->dev,
+ "megasas: FW restarted successfully from %s!\n",
+ __func__);
+
+ /*waitting for about 30 second before retry*/
+ ssleep(30);
+
+ if (megasas_transition_to_ready(instance, 0))
+ goto fail_ready_state;
+ }
/*
* MSI-X host index 0 is common for all adapter.
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 3b1ea34..eaa808e 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -1031,6 +1031,9 @@
{
int i, result;
+ if (sdev->skip_vpd_pages)
+ goto fail;
+
/* Ask for all the pages supported by this device */
result = scsi_vpd_inquiry(sdev, buf, 0, buf_len);
if (result)
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index 2168258..74b88ef 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -751,7 +751,7 @@
vscsi->affinity_hint_set = true;
} else {
- for (i = 0; i < vscsi->num_queues - VIRTIO_SCSI_VQ_BASE; i++)
+ for (i = 0; i < vscsi->num_queues; i++)
virtqueue_set_affinity(vscsi->req_vqs[i].vq, -1);
vscsi->affinity_hint_set = false;
diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c
index 222d3e3..707966b 100644
--- a/drivers/spi/spi-davinci.c
+++ b/drivers/spi/spi-davinci.c
@@ -609,7 +609,7 @@
else
buf = (void *)t->tx_buf;
t->tx_dma = dma_map_single(&spi->dev, buf,
- t->len, DMA_FROM_DEVICE);
+ t->len, DMA_TO_DEVICE);
if (!t->tx_dma) {
ret = -EFAULT;
goto err_tx_map;
diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c
index e25eba5..b3b5125 100644
--- a/drivers/staging/comedi/drivers.c
+++ b/drivers/staging/comedi/drivers.c
@@ -482,7 +482,7 @@
ret = comedi_device_postconfig(dev);
if (ret < 0) {
comedi_device_detach(dev);
- module_put(dev->driver->module);
+ module_put(driv->module);
}
/* On success, the driver module count has been incremented. */
return ret;
diff --git a/drivers/staging/zcache/zcache-main.c b/drivers/staging/zcache/zcache-main.c
index dcceed2..81972fa 100644
--- a/drivers/staging/zcache/zcache-main.c
+++ b/drivers/staging/zcache/zcache-main.c
@@ -1811,10 +1811,12 @@
#else
if (*zcache_comp_name != '\0') {
ret = crypto_has_comp(zcache_comp_name, 0, 0);
- if (!ret)
+ if (!ret) {
pr_info("zcache: %s not supported\n",
zcache_comp_name);
- goto out;
+ ret = 1;
+ goto out;
+ }
}
if (!ret)
strcpy(zcache_comp_name, "lzo");
diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c
index 609dbc2..83b4ef4 100644
--- a/drivers/usb/class/usbtmc.c
+++ b/drivers/usb/class/usbtmc.c
@@ -1119,11 +1119,11 @@
/* Determine if it is a Rigol or not */
data->rigol_quirk = 0;
dev_dbg(&intf->dev, "Trying to find if device Vendor 0x%04X Product 0x%04X has the RIGOL quirk\n",
- data->usb_dev->descriptor.idVendor,
- data->usb_dev->descriptor.idProduct);
+ le16_to_cpu(data->usb_dev->descriptor.idVendor),
+ le16_to_cpu(data->usb_dev->descriptor.idProduct));
for(n = 0; usbtmc_id_quirk[n].idVendor > 0; n++) {
- if ((usbtmc_id_quirk[n].idVendor == data->usb_dev->descriptor.idVendor) &&
- (usbtmc_id_quirk[n].idProduct == data->usb_dev->descriptor.idProduct)) {
+ if ((usbtmc_id_quirk[n].idVendor == le16_to_cpu(data->usb_dev->descriptor.idVendor)) &&
+ (usbtmc_id_quirk[n].idProduct == le16_to_cpu(data->usb_dev->descriptor.idProduct))) {
dev_dbg(&intf->dev, "Setting this device as having the RIGOL quirk\n");
data->rigol_quirk = 1;
break;
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 4a8a1d6..558313d 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -4798,7 +4798,8 @@
hub->ports[i - 1]->child;
dev_dbg(hub_dev, "warm reset port %d\n", i);
- if (!udev) {
+ if (!udev || !(portstatus &
+ USB_PORT_STAT_CONNECTION)) {
status = hub_port_reset(hub, i,
NULL, HUB_BH_RESET_TIME,
true);
@@ -4808,8 +4809,8 @@
usb_lock_device(udev);
status = usb_reset_device(udev);
usb_unlock_device(udev);
+ connect_change = 0;
}
- connect_change = 0;
}
if (connect_change)
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index a635988..5b44cd4 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -78,6 +78,12 @@
{ USB_DEVICE(0x04d8, 0x000c), .driver_info =
USB_QUIRK_CONFIG_INTF_STRINGS },
+ /* CarrolTouch 4000U */
+ { USB_DEVICE(0x04e7, 0x0009), .driver_info = USB_QUIRK_RESET_RESUME },
+
+ /* CarrolTouch 4500U */
+ { USB_DEVICE(0x04e7, 0x0030), .driver_info = USB_QUIRK_RESET_RESUME },
+
/* Samsung Android phone modem - ID conflict with SPH-I500 */
{ USB_DEVICE(0x04e8, 0x6601), .driver_info =
USB_QUIRK_CONFIG_INTF_STRINGS },
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
index f80d033..8e3c878 100644
--- a/drivers/usb/host/ehci-sched.c
+++ b/drivers/usb/host/ehci-sched.c
@@ -1391,21 +1391,20 @@
/* Behind the scheduling threshold? */
if (unlikely(start < next)) {
+ unsigned now2 = (now - base) & (mod - 1);
/* USB_ISO_ASAP: Round up to the first available slot */
if (urb->transfer_flags & URB_ISO_ASAP)
start += (next - start + period - 1) & -period;
/*
- * Not ASAP: Use the next slot in the stream. If
- * the entire URB falls before the threshold, fail.
+ * Not ASAP: Use the next slot in the stream,
+ * no matter what.
*/
- else if (start + span - period < next) {
- ehci_dbg(ehci, "iso urb late %p (%u+%u < %u)\n",
+ else if (start + span - period < now2) {
+ ehci_dbg(ehci, "iso underrun %p (%u+%u < %u)\n",
urb, start + base,
- span - period, next + base);
- status = -EXDEV;
- goto fail;
+ span - period, now2 + base);
}
}
diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c
index 08613e2..0f1d193 100644
--- a/drivers/usb/host/ohci-pci.c
+++ b/drivers/usb/host/ohci-pci.c
@@ -304,6 +304,11 @@
pr_info("%s: " DRIVER_DESC "\n", hcd_name);
ohci_init_driver(&ohci_pci_hc_driver, &pci_overrides);
+
+ /* Entries for the PCI suspend/resume callbacks are special */
+ ohci_pci_hc_driver.pci_suspend = ohci_suspend;
+ ohci_pci_hc_driver.pci_resume = ohci_resume;
+
return pci_register_driver(&ohci_pci_driver);
}
module_init(ohci_pci_init);
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index df6978a..6f8c2fd 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -24,6 +24,7 @@
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/dmapool.h>
+#include <linux/dma-mapping.h>
#include "xhci.h"
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 41eb4fc..9478caa 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -27,6 +27,7 @@
#include <linux/moduleparam.h>
#include <linux/slab.h>
#include <linux/dmi.h>
+#include <linux/dma-mapping.h>
#include "xhci.h"
diff --git a/drivers/usb/misc/adutux.c b/drivers/usb/misc/adutux.c
index eb3c8c1..eeb2720 100644
--- a/drivers/usb/misc/adutux.c
+++ b/drivers/usb/misc/adutux.c
@@ -830,7 +830,7 @@
/* let the user know what node this device is now attached to */
dev_info(&interface->dev, "ADU%d %s now attached to /dev/usb/adutux%d\n",
- udev->descriptor.idProduct, dev->serial_number,
+ le16_to_cpu(udev->descriptor.idProduct), dev->serial_number,
(dev->minor - ADU_MINOR_BASE));
exit:
dbg(2, " %s : leave, return value %p (dev)", __func__, dev);
diff --git a/drivers/usb/phy/phy-fsl-usb.h b/drivers/usb/phy/phy-fsl-usb.h
index ca26628..e1859b8 100644
--- a/drivers/usb/phy/phy-fsl-usb.h
+++ b/drivers/usb/phy/phy-fsl-usb.h
@@ -15,7 +15,7 @@
* 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-#include "otg_fsm.h"
+#include "phy-fsm-usb.h"
#include <linux/usb/otg.h>
#include <linux/ioctl.h>
diff --git a/drivers/usb/phy/phy-fsm-usb.c b/drivers/usb/phy/phy-fsm-usb.c
index c520b35..7f45966 100644
--- a/drivers/usb/phy/phy-fsm-usb.c
+++ b/drivers/usb/phy/phy-fsm-usb.c
@@ -29,7 +29,7 @@
#include <linux/usb/gadget.h>
#include <linux/usb/otg.h>
-#include "phy-otg-fsm.h"
+#include "phy-fsm-usb.h"
/* Change USB protocol when there is a protocol change */
static int otg_set_protocol(struct otg_fsm *fsm, int protocol)
diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c
index 5a97972..58c17fd 100644
--- a/drivers/usb/serial/keyspan.c
+++ b/drivers/usb/serial/keyspan.c
@@ -2303,7 +2303,7 @@
if (d_details == NULL) {
dev_err(&serial->dev->dev, "%s - unknown product id %x\n",
__func__, le16_to_cpu(serial->dev->descriptor.idProduct));
- return 1;
+ return -ENODEV;
}
/* Setup private data for serial driver */
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
index 51da424..b013001 100644
--- a/drivers/usb/serial/mos7720.c
+++ b/drivers/usb/serial/mos7720.c
@@ -90,6 +90,7 @@
struct list_head urblist_entry;
struct kref ref_count;
struct urb *urb;
+ struct usb_ctrlrequest *setup;
};
enum mos7715_pp_modes {
@@ -271,6 +272,7 @@
struct mos7715_parport *mos_parport = urbtrack->mos_parport;
usb_free_urb(urbtrack->urb);
+ kfree(urbtrack->setup);
kfree(urbtrack);
kref_put(&mos_parport->ref_count, destroy_mos_parport);
}
@@ -355,7 +357,6 @@
struct urbtracker *urbtrack;
int ret_val;
unsigned long flags;
- struct usb_ctrlrequest setup;
struct usb_serial *serial = mos_parport->serial;
struct usb_device *usbdev = serial->dev;
@@ -373,14 +374,20 @@
kfree(urbtrack);
return -ENOMEM;
}
- setup.bRequestType = (__u8)0x40;
- setup.bRequest = (__u8)0x0e;
- setup.wValue = get_reg_value(reg, dummy);
- setup.wIndex = get_reg_index(reg);
- setup.wLength = 0;
+ urbtrack->setup = kmalloc(sizeof(*urbtrack->setup), GFP_KERNEL);
+ if (!urbtrack->setup) {
+ usb_free_urb(urbtrack->urb);
+ kfree(urbtrack);
+ return -ENOMEM;
+ }
+ urbtrack->setup->bRequestType = (__u8)0x40;
+ urbtrack->setup->bRequest = (__u8)0x0e;
+ urbtrack->setup->wValue = get_reg_value(reg, dummy);
+ urbtrack->setup->wIndex = get_reg_index(reg);
+ urbtrack->setup->wLength = 0;
usb_fill_control_urb(urbtrack->urb, usbdev,
usb_sndctrlpipe(usbdev, 0),
- (unsigned char *)&setup,
+ (unsigned char *)urbtrack->setup,
NULL, 0, async_complete, urbtrack);
kref_init(&urbtrack->ref_count);
INIT_LIST_HEAD(&urbtrack->urblist_entry);
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index d953d67..3bac469 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -2193,7 +2193,7 @@
static int mos7840_probe(struct usb_serial *serial,
const struct usb_device_id *id)
{
- u16 product = serial->dev->descriptor.idProduct;
+ u16 product = le16_to_cpu(serial->dev->descriptor.idProduct);
u8 *buf;
int device_type;
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index 375b5a4..5c9f9b1 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -1536,14 +1536,15 @@
char buf[32];
/* try ID specific firmware first, then try generic firmware */
- sprintf(buf, "ti_usb-v%04x-p%04x.fw", dev->descriptor.idVendor,
- dev->descriptor.idProduct);
+ sprintf(buf, "ti_usb-v%04x-p%04x.fw",
+ le16_to_cpu(dev->descriptor.idVendor),
+ le16_to_cpu(dev->descriptor.idProduct));
status = request_firmware(&fw_p, buf, &dev->dev);
if (status != 0) {
buf[0] = '\0';
- if (dev->descriptor.idVendor == MTS_VENDOR_ID) {
- switch (dev->descriptor.idProduct) {
+ if (le16_to_cpu(dev->descriptor.idVendor) == MTS_VENDOR_ID) {
+ switch (le16_to_cpu(dev->descriptor.idProduct)) {
case MTS_CDMA_PRODUCT_ID:
strcpy(buf, "mts_cdma.fw");
break;
diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c
index 8257d30..8536578 100644
--- a/drivers/usb/serial/usb_wwan.c
+++ b/drivers/usb/serial/usb_wwan.c
@@ -291,18 +291,18 @@
tty_flip_buffer_push(&port->port);
} else
dev_dbg(dev, "%s: empty read urb received\n", __func__);
-
- /* Resubmit urb so we continue receiving */
- err = usb_submit_urb(urb, GFP_ATOMIC);
- if (err) {
- if (err != -EPERM) {
- dev_err(dev, "%s: resubmit read urb failed. (%d)\n", __func__, err);
- /* busy also in error unless we are killed */
- usb_mark_last_busy(port->serial->dev);
- }
- } else {
+ }
+ /* Resubmit urb so we continue receiving */
+ err = usb_submit_urb(urb, GFP_ATOMIC);
+ if (err) {
+ if (err != -EPERM) {
+ dev_err(dev, "%s: resubmit read urb failed. (%d)\n",
+ __func__, err);
+ /* busy also in error unless we are killed */
usb_mark_last_busy(port->serial->dev);
}
+ } else {
+ usb_mark_last_busy(port->serial->dev);
}
}
diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
index 16968c8..d3493ca 100644
--- a/drivers/usb/wusbcore/wa-xfer.c
+++ b/drivers/usb/wusbcore/wa-xfer.c
@@ -1226,6 +1226,12 @@
}
spin_lock_irqsave(&xfer->lock, flags);
rpipe = xfer->ep->hcpriv;
+ if (rpipe == NULL) {
+ pr_debug("%s: xfer id 0x%08X has no RPIPE. %s",
+ __func__, wa_xfer_id(xfer),
+ "Probably already aborted.\n" );
+ goto out_unlock;
+ }
/* Check the delayed list -> if there, release and complete */
spin_lock_irqsave(&wa->xfer_list_lock, flags2);
if (!list_empty(&xfer->list_node) && xfer->seg == NULL)
@@ -1644,8 +1650,7 @@
break;
}
usb_status = xfer_result->bTransferStatus & 0x3f;
- if (usb_status == WA_XFER_STATUS_ABORTED
- || usb_status == WA_XFER_STATUS_NOT_FOUND)
+ if (usb_status == WA_XFER_STATUS_NOT_FOUND)
/* taken care of already */
break;
xfer_id = xfer_result->dwTransferID;
diff --git a/drivers/video/mxsfb.c b/drivers/video/mxsfb.c
index 3ba3771..dc09ebe 100644
--- a/drivers/video/mxsfb.c
+++ b/drivers/video/mxsfb.c
@@ -239,24 +239,6 @@
}
};
-static const struct fb_bitfield def_rgb666[] = {
- [RED] = {
- .offset = 16,
- .length = 6,
- },
- [GREEN] = {
- .offset = 8,
- .length = 6,
- },
- [BLUE] = {
- .offset = 0,
- .length = 6,
- },
- [TRANSP] = { /* no support for transparency */
- .length = 0,
- }
-};
-
static const struct fb_bitfield def_rgb888[] = {
[RED] = {
.offset = 16,
@@ -309,9 +291,6 @@
break;
case STMLCDIF_16BIT:
case STMLCDIF_18BIT:
- /* 24 bit to 18 bit mapping */
- rgb = def_rgb666;
- break;
case STMLCDIF_24BIT:
/* real 24 bit */
rgb = def_rgb888;
@@ -453,11 +432,6 @@
return -EINVAL;
case STMLCDIF_16BIT:
case STMLCDIF_18BIT:
- /* 24 bit to 18 bit mapping */
- ctrl |= CTRL_DF24; /* ignore the upper 2 bits in
- * each colour component
- */
- break;
case STMLCDIF_24BIT:
/* real 24 bit */
break;
diff --git a/drivers/video/omap2/displays-new/connector-analog-tv.c b/drivers/video/omap2/displays-new/connector-analog-tv.c
index 5338f36..1b60698 100644
--- a/drivers/video/omap2/displays-new/connector-analog-tv.c
+++ b/drivers/video/omap2/displays-new/connector-analog-tv.c
@@ -28,6 +28,20 @@
bool invert_polarity;
};
+static const struct omap_video_timings tvc_pal_timings = {
+ .x_res = 720,
+ .y_res = 574,
+ .pixel_clock = 13500,
+ .hsw = 64,
+ .hfp = 12,
+ .hbp = 68,
+ .vsw = 5,
+ .vfp = 5,
+ .vbp = 41,
+
+ .interlace = true,
+};
+
#define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev)
static int tvc_connect(struct omap_dss_device *dssdev)
@@ -212,14 +226,14 @@
return -ENODEV;
}
- ddata->timings = omap_dss_pal_timings;
+ ddata->timings = tvc_pal_timings;
dssdev = &ddata->dssdev;
dssdev->driver = &tvc_driver;
dssdev->dev = &pdev->dev;
dssdev->type = OMAP_DISPLAY_TYPE_VENC;
dssdev->owner = THIS_MODULE;
- dssdev->panel.timings = omap_dss_pal_timings;
+ dssdev->panel.timings = tvc_pal_timings;
r = omapdss_register_display(dssdev);
if (r) {
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index 090a3b7..4035e83 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -349,7 +349,7 @@
for_each_possible_cpu(i)
memset(per_cpu(cpu_evtchn_mask, i),
- (i == 0) ? ~0 : 0, sizeof(*per_cpu(cpu_evtchn_mask, i)));
+ (i == 0) ? ~0 : 0, NR_EVENT_CHANNELS/8);
}
static inline void clear_evtchn(int port)
@@ -1511,8 +1511,10 @@
/* Rebind an evtchn so that it gets delivered to a specific cpu */
static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
{
+ struct shared_info *s = HYPERVISOR_shared_info;
struct evtchn_bind_vcpu bind_vcpu;
int evtchn = evtchn_from_irq(irq);
+ int masked;
if (!VALID_EVTCHN(evtchn))
return -1;
@@ -1529,6 +1531,12 @@
bind_vcpu.vcpu = tcpu;
/*
+ * Mask the event while changing the VCPU binding to prevent
+ * it being delivered on an unexpected VCPU.
+ */
+ masked = sync_test_and_set_bit(evtchn, BM(s->evtchn_mask));
+
+ /*
* If this fails, it usually just indicates that we're dealing with a
* virq or IPI channel, which don't actually need to be rebound. Ignore
* it, but don't do the xenlinux-level rebind in that case.
@@ -1536,6 +1544,9 @@
if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
bind_evtchn_to_cpu(evtchn, tcpu);
+ if (!masked)
+ unmask_evtchn(evtchn);
+
return 0;
}
diff --git a/fs/bfs/inode.c b/fs/bfs/inode.c
index 5e376bb..8defc6b 100644
--- a/fs/bfs/inode.c
+++ b/fs/bfs/inode.c
@@ -40,7 +40,7 @@
int block, off;
inode = iget_locked(sb, ino);
- if (IS_ERR(inode))
+ if (!inode)
return ERR_PTR(-ENOMEM);
if (!(inode->i_state & I_NEW))
return inode;
diff --git a/fs/bio.c b/fs/bio.c
index 94bbc04..c5eae72 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -1045,12 +1045,22 @@
int bio_uncopy_user(struct bio *bio)
{
struct bio_map_data *bmd = bio->bi_private;
- int ret = 0;
+ struct bio_vec *bvec;
+ int ret = 0, i;
- if (!bio_flagged(bio, BIO_NULL_MAPPED))
- ret = __bio_copy_iov(bio, bmd->iovecs, bmd->sgvecs,
- bmd->nr_sgvecs, bio_data_dir(bio) == READ,
- 0, bmd->is_our_pages);
+ if (!bio_flagged(bio, BIO_NULL_MAPPED)) {
+ /*
+ * if we're in a workqueue, the request is orphaned, so
+ * don't copy into a random user address space, just free.
+ */
+ if (current->mm)
+ ret = __bio_copy_iov(bio, bmd->iovecs, bmd->sgvecs,
+ bmd->nr_sgvecs, bio_data_dir(bio) == READ,
+ 0, bmd->is_our_pages);
+ else if (bmd->is_our_pages)
+ bio_for_each_segment_all(bvec, bio, i)
+ __free_page(bvec->bv_page);
+ }
bio_free_map_data(bmd);
bio_put(bio);
return ret;
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index eaf1333..8bc5e8c 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -36,16 +36,23 @@
u64 extent_item_pos,
struct extent_inode_elem **eie)
{
- u64 data_offset;
- u64 data_len;
+ u64 offset = 0;
struct extent_inode_elem *e;
- data_offset = btrfs_file_extent_offset(eb, fi);
- data_len = btrfs_file_extent_num_bytes(eb, fi);
+ if (!btrfs_file_extent_compression(eb, fi) &&
+ !btrfs_file_extent_encryption(eb, fi) &&
+ !btrfs_file_extent_other_encoding(eb, fi)) {
+ u64 data_offset;
+ u64 data_len;
- if (extent_item_pos < data_offset ||
- extent_item_pos >= data_offset + data_len)
- return 1;
+ data_offset = btrfs_file_extent_offset(eb, fi);
+ data_len = btrfs_file_extent_num_bytes(eb, fi);
+
+ if (extent_item_pos < data_offset ||
+ extent_item_pos >= data_offset + data_len)
+ return 1;
+ offset = extent_item_pos - data_offset;
+ }
e = kmalloc(sizeof(*e), GFP_NOFS);
if (!e)
@@ -53,7 +60,7 @@
e->next = *eie;
e->inum = key->objectid;
- e->offset = key->offset + (extent_item_pos - data_offset);
+ e->offset = key->offset + offset;
*eie = e;
return 0;
@@ -189,7 +196,7 @@
struct extent_buffer *eb;
struct btrfs_key key;
struct btrfs_file_extent_item *fi;
- struct extent_inode_elem *eie = NULL;
+ struct extent_inode_elem *eie = NULL, *old = NULL;
u64 disk_byte;
if (level != 0) {
@@ -223,6 +230,7 @@
if (disk_byte == wanted_disk_byte) {
eie = NULL;
+ old = NULL;
if (extent_item_pos) {
ret = check_extent_in_eb(&key, eb, fi,
*extent_item_pos,
@@ -230,18 +238,20 @@
if (ret < 0)
break;
}
- if (!ret) {
- ret = ulist_add(parents, eb->start,
- (uintptr_t)eie, GFP_NOFS);
- if (ret < 0)
- break;
- if (!extent_item_pos) {
- ret = btrfs_next_old_leaf(root, path,
- time_seq);
- continue;
- }
+ if (ret > 0)
+ goto next;
+ ret = ulist_add_merge(parents, eb->start,
+ (uintptr_t)eie,
+ (u64 *)&old, GFP_NOFS);
+ if (ret < 0)
+ break;
+ if (!ret && extent_item_pos) {
+ while (old->next)
+ old = old->next;
+ old->next = eie;
}
}
+next:
ret = btrfs_next_old_item(root, path, time_seq);
}
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 5bf4c39..ed50460 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -1271,7 +1271,6 @@
BUG_ON(!eb_rewin);
}
- extent_buffer_get(eb_rewin);
btrfs_tree_read_unlock(eb);
free_extent_buffer(eb);
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 583d98b..fe443fe 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -4048,7 +4048,7 @@
}
while (!end) {
- u64 offset_in_extent;
+ u64 offset_in_extent = 0;
/* break if the extent we found is outside the range */
if (em->start >= max || extent_map_end(em) < off)
@@ -4064,9 +4064,12 @@
/*
* record the offset from the start of the extent
- * for adjusting the disk offset below
+ * for adjusting the disk offset below. Only do this if the
+ * extent isn't compressed since our in ram offset may be past
+ * what we have actually allocated on disk.
*/
- offset_in_extent = em_start - em->start;
+ if (!test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
+ offset_in_extent = em_start - em->start;
em_end = extent_map_end(em);
em_len = em_end - em_start;
emflags = em->flags;
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index a005fe2..8e686a4 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -596,20 +596,29 @@
if (no_splits)
goto next;
- if (em->block_start < EXTENT_MAP_LAST_BYTE &&
- em->start < start) {
+ if (em->start < start) {
split->start = em->start;
split->len = start - em->start;
- split->orig_start = em->orig_start;
- split->block_start = em->block_start;
- if (compressed)
- split->block_len = em->block_len;
- else
- split->block_len = split->len;
- split->ram_bytes = em->ram_bytes;
- split->orig_block_len = max(split->block_len,
- em->orig_block_len);
+ if (em->block_start < EXTENT_MAP_LAST_BYTE) {
+ split->orig_start = em->orig_start;
+ split->block_start = em->block_start;
+
+ if (compressed)
+ split->block_len = em->block_len;
+ else
+ split->block_len = split->len;
+ split->orig_block_len = max(split->block_len,
+ em->orig_block_len);
+ split->ram_bytes = em->ram_bytes;
+ } else {
+ split->orig_start = split->start;
+ split->block_len = 0;
+ split->block_start = em->block_start;
+ split->orig_block_len = 0;
+ split->ram_bytes = split->len;
+ }
+
split->generation = gen;
split->bdev = em->bdev;
split->flags = flags;
@@ -620,8 +629,7 @@
split = split2;
split2 = NULL;
}
- if (em->block_start < EXTENT_MAP_LAST_BYTE &&
- testend && em->start + em->len > start + len) {
+ if (testend && em->start + em->len > start + len) {
u64 diff = start + len - em->start;
split->start = start + len;
@@ -630,18 +638,28 @@
split->flags = flags;
split->compress_type = em->compress_type;
split->generation = gen;
- split->orig_block_len = max(em->block_len,
- em->orig_block_len);
- split->ram_bytes = em->ram_bytes;
- if (compressed) {
- split->block_len = em->block_len;
- split->block_start = em->block_start;
- split->orig_start = em->orig_start;
+ if (em->block_start < EXTENT_MAP_LAST_BYTE) {
+ split->orig_block_len = max(em->block_len,
+ em->orig_block_len);
+
+ split->ram_bytes = em->ram_bytes;
+ if (compressed) {
+ split->block_len = em->block_len;
+ split->block_start = em->block_start;
+ split->orig_start = em->orig_start;
+ } else {
+ split->block_len = split->len;
+ split->block_start = em->block_start
+ + diff;
+ split->orig_start = em->orig_start;
+ }
} else {
- split->block_len = split->len;
- split->block_start = em->block_start + diff;
- split->orig_start = em->orig_start;
+ split->ram_bytes = split->len;
+ split->orig_start = split->start;
+ split->block_len = 0;
+ split->block_start = em->block_start;
+ split->orig_block_len = 0;
}
ret = add_extent_mapping(em_tree, split, modified);
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 6d1b93c..021694c 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -2166,16 +2166,23 @@
if (btrfs_file_extent_disk_bytenr(leaf, extent) != old->bytenr)
continue;
- extent_offset = btrfs_file_extent_offset(leaf, extent);
- if (key.offset - extent_offset != offset)
+ /*
+ * 'offset' refers to the exact key.offset,
+ * NOT the 'offset' field in btrfs_extent_data_ref, ie.
+ * (key.offset - extent_offset).
+ */
+ if (key.offset != offset)
continue;
+ extent_offset = btrfs_file_extent_offset(leaf, extent);
num_bytes = btrfs_file_extent_num_bytes(leaf, extent);
+
if (extent_offset >= old->extent_offset + old->offset +
old->len || extent_offset + num_bytes <=
old->extent_offset + old->offset)
continue;
+ ret = 0;
break;
}
@@ -2187,7 +2194,7 @@
backref->root_id = root_id;
backref->inum = inum;
- backref->file_pos = offset + extent_offset;
+ backref->file_pos = offset;
backref->num_bytes = num_bytes;
backref->extent_offset = extent_offset;
backref->generation = btrfs_file_extent_generation(leaf, extent);
@@ -2210,7 +2217,8 @@
new->path = path;
list_for_each_entry_safe(old, tmp, &new->head, list) {
- ret = iterate_inodes_from_logical(old->bytenr, fs_info,
+ ret = iterate_inodes_from_logical(old->bytenr +
+ old->extent_offset, fs_info,
path, record_one_backref,
old);
BUG_ON(ret < 0 && ret != -ENOENT);
@@ -4391,9 +4399,6 @@
int mask = attr->ia_valid;
int ret;
- if (newsize == oldsize)
- return 0;
-
/*
* The regular truncate() case without ATTR_CTIME and ATTR_MTIME is a
* special case where we need to update the times despite not having
@@ -5165,14 +5170,31 @@
}
/* Reached end of directory/root. Bump pos past the last item. */
- if (key_type == BTRFS_DIR_INDEX_KEY)
- /*
- * 32-bit glibc will use getdents64, but then strtol -
- * so the last number we can serve is this.
- */
- ctx->pos = 0x7fffffff;
- else
- ctx->pos++;
+ ctx->pos++;
+
+ /*
+ * Stop new entries from being returned after we return the last
+ * entry.
+ *
+ * New directory entries are assigned a strictly increasing
+ * offset. This means that new entries created during readdir
+ * are *guaranteed* to be seen in the future by that readdir.
+ * This has broken buggy programs which operate on names as
+ * they're returned by readdir. Until we re-use freed offsets
+ * we have this hack to stop new entries from being returned
+ * under the assumption that they'll never reach this huge
+ * offset.
+ *
+ * This is being careful not to overflow 32bit loff_t unless the
+ * last entry requires it because doing so has broken 32bit apps
+ * in the past.
+ */
+ if (key_type == BTRFS_DIR_INDEX_KEY) {
+ if (ctx->pos >= INT_MAX)
+ ctx->pos = LLONG_MAX;
+ else
+ ctx->pos = INT_MAX;
+ }
nopos:
ret = 0;
err:
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index d58cce7..af1931a 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -983,12 +983,12 @@
* a dirty root struct and adds it into the list of dead roots that need to
* be deleted
*/
-int btrfs_add_dead_root(struct btrfs_root *root)
+void btrfs_add_dead_root(struct btrfs_root *root)
{
spin_lock(&root->fs_info->trans_lock);
- list_add_tail(&root->root_list, &root->fs_info->dead_roots);
+ if (list_empty(&root->root_list))
+ list_add_tail(&root->root_list, &root->fs_info->dead_roots);
spin_unlock(&root->fs_info->trans_lock);
- return 0;
}
/*
@@ -1925,7 +1925,7 @@
}
root = list_first_entry(&fs_info->dead_roots,
struct btrfs_root, root_list);
- list_del(&root->root_list);
+ list_del_init(&root->root_list);
spin_unlock(&fs_info->trans_lock);
pr_debug("btrfs: cleaner removing %llu\n",
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
index 005b037..defbc42 100644
--- a/fs/btrfs/transaction.h
+++ b/fs/btrfs/transaction.h
@@ -143,7 +143,7 @@
int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
-int btrfs_add_dead_root(struct btrfs_root *root);
+void btrfs_add_dead_root(struct btrfs_root *root);
int btrfs_defrag_root(struct btrfs_root *root);
int btrfs_clean_one_deleted_snapshot(struct btrfs_root *root);
int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 2c67914..ff60d89 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -3746,8 +3746,9 @@
}
log_extents:
+ btrfs_release_path(path);
+ btrfs_release_path(dst_path);
if (fast_search) {
- btrfs_release_path(dst_path);
ret = btrfs_log_changed_extents(trans, root, inode, dst_path);
if (ret) {
err = ret;
@@ -3764,8 +3765,6 @@
}
if (inode_only == LOG_INODE_ALL && S_ISDIR(inode->i_mode)) {
- btrfs_release_path(path);
- btrfs_release_path(dst_path);
ret = log_directory_changes(trans, root, inode, path, dst_path);
if (ret) {
err = ret;
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
index 45e57cc..fc6f4f3 100644
--- a/fs/cifs/cifsencrypt.c
+++ b/fs/cifs/cifsencrypt.c
@@ -43,17 +43,18 @@
server->secmech.md5 = crypto_alloc_shash("md5", 0, 0);
if (IS_ERR(server->secmech.md5)) {
cifs_dbg(VFS, "could not allocate crypto md5\n");
- return PTR_ERR(server->secmech.md5);
+ rc = PTR_ERR(server->secmech.md5);
+ server->secmech.md5 = NULL;
+ return rc;
}
size = sizeof(struct shash_desc) +
crypto_shash_descsize(server->secmech.md5);
server->secmech.sdescmd5 = kmalloc(size, GFP_KERNEL);
if (!server->secmech.sdescmd5) {
- rc = -ENOMEM;
crypto_free_shash(server->secmech.md5);
server->secmech.md5 = NULL;
- return rc;
+ return -ENOMEM;
}
server->secmech.sdescmd5->shash.tfm = server->secmech.md5;
server->secmech.sdescmd5->shash.flags = 0x0;
@@ -421,7 +422,7 @@
if (blobptr + attrsize > blobend)
break;
if (type == NTLMSSP_AV_NB_DOMAIN_NAME) {
- if (!attrsize)
+ if (!attrsize || attrsize >= CIFS_MAX_DOMAINNAME_LEN)
break;
if (!ses->domainName) {
ses->domainName =
@@ -591,6 +592,7 @@
static int crypto_hmacmd5_alloc(struct TCP_Server_Info *server)
{
+ int rc;
unsigned int size;
/* check if already allocated */
@@ -600,7 +602,9 @@
server->secmech.hmacmd5 = crypto_alloc_shash("hmac(md5)", 0, 0);
if (IS_ERR(server->secmech.hmacmd5)) {
cifs_dbg(VFS, "could not allocate crypto hmacmd5\n");
- return PTR_ERR(server->secmech.hmacmd5);
+ rc = PTR_ERR(server->secmech.hmacmd5);
+ server->secmech.hmacmd5 = NULL;
+ return rc;
}
size = sizeof(struct shash_desc) +
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 4bdd547..85ea98d 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -147,18 +147,17 @@
goto out_no_root;
}
+ if (cifs_sb_master_tcon(cifs_sb)->nocase)
+ sb->s_d_op = &cifs_ci_dentry_ops;
+ else
+ sb->s_d_op = &cifs_dentry_ops;
+
sb->s_root = d_make_root(inode);
if (!sb->s_root) {
rc = -ENOMEM;
goto out_no_root;
}
- /* do that *after* d_make_root() - we want NULL ->d_op for root here */
- if (cifs_sb_master_tcon(cifs_sb)->nocase)
- sb->s_d_op = &cifs_ci_dentry_ops;
- else
- sb->s_d_op = &cifs_dentry_ops;
-
#ifdef CONFIG_CIFS_NFSD_EXPORT
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
cifs_dbg(FYI, "export ops supported\n");
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 1fdc370..52ca861 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -44,6 +44,7 @@
#define MAX_TREE_SIZE (2 + MAX_SERVER_SIZE + 1 + MAX_SHARE_SIZE + 1)
#define MAX_SERVER_SIZE 15
#define MAX_SHARE_SIZE 80
+#define CIFS_MAX_DOMAINNAME_LEN 256 /* max domain name length */
#define MAX_USERNAME_SIZE 256 /* reasonable maximum for current servers */
#define MAX_PASSWORD_SIZE 512 /* max for windows seems to be 256 wide chars */
@@ -369,6 +370,9 @@
void (*generate_signingkey)(struct TCP_Server_Info *server);
int (*calc_signature)(struct smb_rqst *rqst,
struct TCP_Server_Info *server);
+ int (*query_mf_symlink)(const unsigned char *path, char *pbuf,
+ unsigned int *pbytes_read, struct cifs_sb_info *cifs_sb,
+ unsigned int xid);
};
struct smb_version_values {
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index f7e584d..b29a012 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -497,5 +497,7 @@
struct cifs_writedata *cifs_writedata_alloc(unsigned int nr_pages,
work_func_t complete);
void cifs_writedata_release(struct kref *refcount);
-
+int open_query_close_cifs_symlink(const unsigned char *path, char *pbuf,
+ unsigned int *pbytes_read, struct cifs_sb_info *cifs_sb,
+ unsigned int xid);
#endif /* _CIFSPROTO_H */
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index fa68813..d67c550 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -1675,7 +1675,8 @@
if (string == NULL)
goto out_nomem;
- if (strnlen(string, 256) == 256) {
+ if (strnlen(string, CIFS_MAX_DOMAINNAME_LEN)
+ == CIFS_MAX_DOMAINNAME_LEN) {
printk(KERN_WARNING "CIFS: domain name too"
" long\n");
goto cifs_parse_mount_err;
@@ -2276,8 +2277,8 @@
#ifdef CONFIG_KEYS
-/* strlen("cifs:a:") + INET6_ADDRSTRLEN + 1 */
-#define CIFSCREDS_DESC_SIZE (7 + INET6_ADDRSTRLEN + 1)
+/* strlen("cifs:a:") + CIFS_MAX_DOMAINNAME_LEN + 1 */
+#define CIFSCREDS_DESC_SIZE (7 + CIFS_MAX_DOMAINNAME_LEN + 1)
/* Populate username and pw fields from keyring if possible */
static int
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 1e57f36..7e36ae3 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -647,6 +647,7 @@
oflags, &oplock, &cfile->fid.netfid, xid);
if (rc == 0) {
cifs_dbg(FYI, "posix reopen succeeded\n");
+ oparms.reconnect = true;
goto reopen_success;
}
/*
diff --git a/fs/cifs/link.c b/fs/cifs/link.c
index b83c3f5..562044f 100644
--- a/fs/cifs/link.c
+++ b/fs/cifs/link.c
@@ -305,67 +305,89 @@
}
int
-CIFSCheckMFSymlink(struct cifs_fattr *fattr,
- const unsigned char *path,
- struct cifs_sb_info *cifs_sb, unsigned int xid)
+open_query_close_cifs_symlink(const unsigned char *path, char *pbuf,
+ unsigned int *pbytes_read, struct cifs_sb_info *cifs_sb,
+ unsigned int xid)
{
int rc;
int oplock = 0;
__u16 netfid = 0;
struct tcon_link *tlink;
- struct cifs_tcon *pTcon;
+ struct cifs_tcon *ptcon;
struct cifs_io_parms io_parms;
- u8 *buf;
- char *pbuf;
- unsigned int bytes_read = 0;
int buf_type = CIFS_NO_BUFFER;
- unsigned int link_len = 0;
FILE_ALL_INFO file_info;
- if (!CIFSCouldBeMFSymlink(fattr))
- /* it's not a symlink */
- return 0;
-
tlink = cifs_sb_tlink(cifs_sb);
if (IS_ERR(tlink))
return PTR_ERR(tlink);
- pTcon = tlink_tcon(tlink);
+ ptcon = tlink_tcon(tlink);
- rc = CIFSSMBOpen(xid, pTcon, path, FILE_OPEN, GENERIC_READ,
+ rc = CIFSSMBOpen(xid, ptcon, path, FILE_OPEN, GENERIC_READ,
CREATE_NOT_DIR, &netfid, &oplock, &file_info,
cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
- if (rc != 0)
- goto out;
+ if (rc != 0) {
+ cifs_put_tlink(tlink);
+ return rc;
+ }
if (file_info.EndOfFile != cpu_to_le64(CIFS_MF_SYMLINK_FILE_SIZE)) {
- CIFSSMBClose(xid, pTcon, netfid);
+ CIFSSMBClose(xid, ptcon, netfid);
+ cifs_put_tlink(tlink);
/* it's not a symlink */
- goto out;
+ return rc;
}
+ io_parms.netfid = netfid;
+ io_parms.pid = current->tgid;
+ io_parms.tcon = ptcon;
+ io_parms.offset = 0;
+ io_parms.length = CIFS_MF_SYMLINK_FILE_SIZE;
+
+ rc = CIFSSMBRead(xid, &io_parms, pbytes_read, &pbuf, &buf_type);
+ CIFSSMBClose(xid, ptcon, netfid);
+ cifs_put_tlink(tlink);
+ return rc;
+}
+
+
+int
+CIFSCheckMFSymlink(struct cifs_fattr *fattr,
+ const unsigned char *path,
+ struct cifs_sb_info *cifs_sb, unsigned int xid)
+{
+ int rc = 0;
+ u8 *buf = NULL;
+ unsigned int link_len = 0;
+ unsigned int bytes_read = 0;
+ struct cifs_tcon *ptcon;
+
+ if (!CIFSCouldBeMFSymlink(fattr))
+ /* it's not a symlink */
+ return 0;
+
buf = kmalloc(CIFS_MF_SYMLINK_FILE_SIZE, GFP_KERNEL);
if (!buf) {
rc = -ENOMEM;
goto out;
}
- pbuf = buf;
- io_parms.netfid = netfid;
- io_parms.pid = current->tgid;
- io_parms.tcon = pTcon;
- io_parms.offset = 0;
- io_parms.length = CIFS_MF_SYMLINK_FILE_SIZE;
- rc = CIFSSMBRead(xid, &io_parms, &bytes_read, &pbuf, &buf_type);
- CIFSSMBClose(xid, pTcon, netfid);
- if (rc != 0) {
- kfree(buf);
+ ptcon = tlink_tcon(cifs_sb_tlink(cifs_sb));
+ if ((ptcon->ses) && (ptcon->ses->server->ops->query_mf_symlink))
+ rc = ptcon->ses->server->ops->query_mf_symlink(path, buf,
+ &bytes_read, cifs_sb, xid);
+ else
goto out;
- }
+
+ if (rc != 0)
+ goto out;
+
+ if (bytes_read == 0) /* not a symlink */
+ goto out;
rc = CIFSParseMFSymlink(buf, bytes_read, &link_len, NULL);
- kfree(buf);
if (rc == -EINVAL) {
/* it's not a symlink */
rc = 0;
@@ -381,7 +403,7 @@
fattr->cf_mode |= S_IFLNK | S_IRWXU | S_IRWXG | S_IRWXO;
fattr->cf_dtype = DT_LNK;
out:
- cifs_put_tlink(tlink);
+ kfree(buf);
return rc;
}
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index ab87784..69d2c82 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -111,6 +111,14 @@
return;
}
+ /*
+ * If we know that the inode will need to be revalidated immediately,
+ * then don't create a new dentry for it. We'll end up doing an on
+ * the wire call either way and this spares us an invalidation.
+ */
+ if (fattr->cf_flags & CIFS_FATTR_NEED_REVAL)
+ return;
+
dentry = d_alloc(parent, name);
if (!dentry)
return;
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
index 79358e3..08dd37b 100644
--- a/fs/cifs/sess.c
+++ b/fs/cifs/sess.c
@@ -197,7 +197,7 @@
bytes_ret = 0;
} else
bytes_ret = cifs_strtoUTF16((__le16 *) bcc_ptr, ses->domainName,
- 256, nls_cp);
+ CIFS_MAX_DOMAINNAME_LEN, nls_cp);
bcc_ptr += 2 * bytes_ret;
bcc_ptr += 2; /* account for null terminator */
@@ -255,8 +255,8 @@
/* copy domain */
if (ses->domainName != NULL) {
- strncpy(bcc_ptr, ses->domainName, 256);
- bcc_ptr += strnlen(ses->domainName, 256);
+ strncpy(bcc_ptr, ses->domainName, CIFS_MAX_DOMAINNAME_LEN);
+ bcc_ptr += strnlen(ses->domainName, CIFS_MAX_DOMAINNAME_LEN);
} /* else we will send a null domain name
so the server will default to its own domain */
*bcc_ptr = 0;
diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
index 6457690..6094397 100644
--- a/fs/cifs/smb1ops.c
+++ b/fs/cifs/smb1ops.c
@@ -944,6 +944,7 @@
.mand_lock = cifs_mand_lock,
.mand_unlock_range = cifs_unlock_range,
.push_mand_locks = cifs_push_mandatory_locks,
+ .query_mf_symlink = open_query_close_cifs_symlink,
};
struct smb_version_values smb1_values = {
diff --git a/fs/cifs/smb2transport.c b/fs/cifs/smb2transport.c
index 301b191..4f2300d 100644
--- a/fs/cifs/smb2transport.c
+++ b/fs/cifs/smb2transport.c
@@ -42,6 +42,7 @@
static int
smb2_crypto_shash_allocate(struct TCP_Server_Info *server)
{
+ int rc;
unsigned int size;
if (server->secmech.sdeschmacsha256 != NULL)
@@ -50,7 +51,9 @@
server->secmech.hmacsha256 = crypto_alloc_shash("hmac(sha256)", 0, 0);
if (IS_ERR(server->secmech.hmacsha256)) {
cifs_dbg(VFS, "could not allocate crypto hmacsha256\n");
- return PTR_ERR(server->secmech.hmacsha256);
+ rc = PTR_ERR(server->secmech.hmacsha256);
+ server->secmech.hmacsha256 = NULL;
+ return rc;
}
size = sizeof(struct shash_desc) +
@@ -87,7 +90,9 @@
server->secmech.sdeschmacsha256 = NULL;
crypto_free_shash(server->secmech.hmacsha256);
server->secmech.hmacsha256 = NULL;
- return PTR_ERR(server->secmech.cmacaes);
+ rc = PTR_ERR(server->secmech.cmacaes);
+ server->secmech.cmacaes = NULL;
+ return rc;
}
size = sizeof(struct shash_desc) +
diff --git a/fs/dcache.c b/fs/dcache.c
index 87bdb53..83cfb83 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -2724,6 +2724,17 @@
return memcpy(buffer, temp, sz);
}
+char *simple_dname(struct dentry *dentry, char *buffer, int buflen)
+{
+ char *end = buffer + buflen;
+ /* these dentries are never renamed, so d_lock is not needed */
+ if (prepend(&end, &buflen, " (deleted)", 11) ||
+ prepend_name(&end, &buflen, &dentry->d_name) ||
+ prepend(&end, &buflen, "/", 1))
+ end = ERR_PTR(-ENAMETOOLONG);
+ return end;
+}
+
/*
* Write full pathname from the root of the filesystem into the buffer.
*/
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
index 4888cb3..c7c83ff 100644
--- a/fs/debugfs/inode.c
+++ b/fs/debugfs/inode.c
@@ -533,8 +533,7 @@
*/
void debugfs_remove_recursive(struct dentry *dentry)
{
- struct dentry *child;
- struct dentry *parent;
+ struct dentry *child, *next, *parent;
if (IS_ERR_OR_NULL(dentry))
return;
@@ -544,61 +543,37 @@
return;
parent = dentry;
+ down:
mutex_lock(&parent->d_inode->i_mutex);
+ list_for_each_entry_safe(child, next, &parent->d_subdirs, d_u.d_child) {
+ if (!debugfs_positive(child))
+ continue;
- while (1) {
- /*
- * When all dentries under "parent" has been removed,
- * walk up the tree until we reach our starting point.
- */
- if (list_empty(&parent->d_subdirs)) {
- mutex_unlock(&parent->d_inode->i_mutex);
- if (parent == dentry)
- break;
- parent = parent->d_parent;
- mutex_lock(&parent->d_inode->i_mutex);
- }
- child = list_entry(parent->d_subdirs.next, struct dentry,
- d_u.d_child);
- next_sibling:
-
- /*
- * If "child" isn't empty, walk down the tree and
- * remove all its descendants first.
- */
+ /* perhaps simple_empty(child) makes more sense */
if (!list_empty(&child->d_subdirs)) {
mutex_unlock(&parent->d_inode->i_mutex);
parent = child;
- mutex_lock(&parent->d_inode->i_mutex);
- continue;
+ goto down;
}
- __debugfs_remove(child, parent);
- if (parent->d_subdirs.next == &child->d_u.d_child) {
- /*
- * Try the next sibling.
- */
- if (child->d_u.d_child.next != &parent->d_subdirs) {
- child = list_entry(child->d_u.d_child.next,
- struct dentry,
- d_u.d_child);
- goto next_sibling;
- }
-
- /*
- * Avoid infinite loop if we fail to remove
- * one dentry.
- */
- mutex_unlock(&parent->d_inode->i_mutex);
- break;
- }
- simple_release_fs(&debugfs_mount, &debugfs_mount_count);
+ up:
+ if (!__debugfs_remove(child, parent))
+ simple_release_fs(&debugfs_mount, &debugfs_mount_count);
}
- parent = dentry->d_parent;
- mutex_lock(&parent->d_inode->i_mutex);
- __debugfs_remove(dentry, parent);
mutex_unlock(&parent->d_inode->i_mutex);
- simple_release_fs(&debugfs_mount, &debugfs_mount_count);
+ child = parent;
+ parent = parent->d_parent;
+ mutex_lock(&parent->d_inode->i_mutex);
+
+ if (child != dentry) {
+ next = list_entry(child->d_u.d_child.next, struct dentry,
+ d_u.d_child);
+ goto up;
+ }
+
+ if (!__debugfs_remove(child, parent))
+ simple_release_fs(&debugfs_mount, &debugfs_mount_count);
+ mutex_unlock(&parent->d_inode->i_mutex);
}
EXPORT_SYMBOL_GPL(debugfs_remove_recursive);
diff --git a/fs/dlm/user.c b/fs/dlm/user.c
index 911649a..81214911 100644
--- a/fs/dlm/user.c
+++ b/fs/dlm/user.c
@@ -686,7 +686,6 @@
device_remove_lockspace() */
sigprocmask(SIG_SETMASK, &tmpsig, NULL);
- recalc_sigpending();
return 0;
}
diff --git a/fs/efs/inode.c b/fs/efs/inode.c
index f3913eb..d15ccf2 100644
--- a/fs/efs/inode.c
+++ b/fs/efs/inode.c
@@ -57,7 +57,7 @@
struct inode *inode;
inode = iget_locked(super, ino);
- if (IS_ERR(inode))
+ if (!inode)
return ERR_PTR(-ENOMEM);
if (!(inode->i_state & I_NEW))
return inode;
diff --git a/fs/exec.c b/fs/exec.c
index 9c73def..fd774c7 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -608,7 +608,7 @@
return -ENOMEM;
lru_add_drain();
- tlb_gather_mmu(&tlb, mm, 0);
+ tlb_gather_mmu(&tlb, mm, old_start, old_end);
if (new_end > old_start) {
/*
* when the old and new regions overlap clear from new_end.
@@ -625,7 +625,7 @@
free_pgd_range(&tlb, old_start, old_end, new_end,
vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING);
}
- tlb_finish_mmu(&tlb, new_end, old_end);
+ tlb_finish_mmu(&tlb, old_start, old_end);
/*
* Shrink the vma to just the new range. Always succeeds.
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index b577e45..0ab26fb 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -2086,6 +2086,7 @@
extern void ext4_dirty_inode(struct inode *, int);
extern int ext4_change_inode_journal_flag(struct inode *, int);
extern int ext4_get_inode_loc(struct inode *, struct ext4_iloc *);
+extern int ext4_inode_attach_jinode(struct inode *inode);
extern int ext4_can_truncate(struct inode *inode);
extern void ext4_truncate(struct inode *);
extern int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length);
diff --git a/fs/ext4/ext4_jbd2.c b/fs/ext4/ext4_jbd2.c
index 72a3600..17ac112 100644
--- a/fs/ext4/ext4_jbd2.c
+++ b/fs/ext4/ext4_jbd2.c
@@ -255,10 +255,10 @@
set_buffer_prio(bh);
if (ext4_handle_valid(handle)) {
err = jbd2_journal_dirty_metadata(handle, bh);
- if (err) {
- /* Errors can only happen if there is a bug */
- handle->h_err = err;
- __ext4_journal_stop(where, line, handle);
+ /* Errors can only happen if there is a bug */
+ if (WARN_ON_ONCE(err)) {
+ ext4_journal_abort_handle(where, line, __func__, bh,
+ handle, err);
}
} else {
if (inode)
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index a618738..72ba470 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -4412,7 +4412,7 @@
retry:
err = ext4_es_remove_extent(inode, last_block,
EXT_MAX_BLOCKS - last_block);
- if (err == ENOMEM) {
+ if (err == -ENOMEM) {
cond_resched();
congestion_wait(BLK_RW_ASYNC, HZ/50);
goto retry;
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 6f4cc56..319c9d2 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -219,7 +219,6 @@
{
struct super_block *sb = inode->i_sb;
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
- struct ext4_inode_info *ei = EXT4_I(inode);
struct vfsmount *mnt = filp->f_path.mnt;
struct path path;
char buf[64], *cp;
@@ -259,22 +258,10 @@
* Set up the jbd2_inode if we are opening the inode for
* writing and the journal is present
*/
- if (sbi->s_journal && !ei->jinode && (filp->f_mode & FMODE_WRITE)) {
- struct jbd2_inode *jinode = jbd2_alloc_inode(GFP_KERNEL);
-
- spin_lock(&inode->i_lock);
- if (!ei->jinode) {
- if (!jinode) {
- spin_unlock(&inode->i_lock);
- return -ENOMEM;
- }
- ei->jinode = jinode;
- jbd2_journal_init_jbd_inode(ei->jinode, inode);
- jinode = NULL;
- }
- spin_unlock(&inode->i_lock);
- if (unlikely(jinode != NULL))
- jbd2_free_inode(jinode);
+ if (filp->f_mode & FMODE_WRITE) {
+ int ret = ext4_inode_attach_jinode(inode);
+ if (ret < 0)
+ return ret;
}
return dquot_file_open(inode, filp);
}
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index f03598c..8bf5999 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -734,11 +734,8 @@
ino = ext4_find_next_zero_bit((unsigned long *)
inode_bitmap_bh->b_data,
EXT4_INODES_PER_GROUP(sb), ino);
- if (ino >= EXT4_INODES_PER_GROUP(sb)) {
- if (++group == ngroups)
- group = 0;
- continue;
- }
+ if (ino >= EXT4_INODES_PER_GROUP(sb))
+ goto next_group;
if (group == 0 && (ino+1) < EXT4_FIRST_INO(sb)) {
ext4_error(sb, "reserved inode found cleared - "
"inode=%lu", ino + 1);
@@ -769,6 +766,9 @@
goto got; /* we grabbed the inode! */
if (ino < EXT4_INODES_PER_GROUP(sb))
goto repeat_in_this_group;
+next_group:
+ if (++group == ngroups)
+ group = 0;
}
err = -ENOSPC;
goto out;
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index ba33c67..c2ca04e 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -555,14 +555,13 @@
int ret;
unsigned long long status;
-#ifdef ES_AGGRESSIVE_TEST
- if (retval != map->m_len) {
- printk("ES len assertion failed for inode: %lu "
- "retval %d != map->m_len %d "
- "in %s (lookup)\n", inode->i_ino, retval,
- map->m_len, __func__);
+ if (unlikely(retval != map->m_len)) {
+ ext4_warning(inode->i_sb,
+ "ES len assertion failed for inode "
+ "%lu: retval %d != map->m_len %d",
+ inode->i_ino, retval, map->m_len);
+ WARN_ON(1);
}
-#endif
status = map->m_flags & EXT4_MAP_UNWRITTEN ?
EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
@@ -656,14 +655,13 @@
int ret;
unsigned long long status;
-#ifdef ES_AGGRESSIVE_TEST
- if (retval != map->m_len) {
- printk("ES len assertion failed for inode: %lu "
- "retval %d != map->m_len %d "
- "in %s (allocation)\n", inode->i_ino, retval,
- map->m_len, __func__);
+ if (unlikely(retval != map->m_len)) {
+ ext4_warning(inode->i_sb,
+ "ES len assertion failed for inode "
+ "%lu: retval %d != map->m_len %d",
+ inode->i_ino, retval, map->m_len);
+ WARN_ON(1);
}
-#endif
/*
* If the extent has been zeroed out, we don't need to update
@@ -1637,14 +1635,13 @@
int ret;
unsigned long long status;
-#ifdef ES_AGGRESSIVE_TEST
- if (retval != map->m_len) {
- printk("ES len assertion failed for inode: %lu "
- "retval %d != map->m_len %d "
- "in %s (lookup)\n", inode->i_ino, retval,
- map->m_len, __func__);
+ if (unlikely(retval != map->m_len)) {
+ ext4_warning(inode->i_sb,
+ "ES len assertion failed for inode "
+ "%lu: retval %d != map->m_len %d",
+ inode->i_ino, retval, map->m_len);
+ WARN_ON(1);
}
-#endif
status = map->m_flags & EXT4_MAP_UNWRITTEN ?
EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
@@ -3536,6 +3533,18 @@
offset;
}
+ if (offset & (sb->s_blocksize - 1) ||
+ (offset + length) & (sb->s_blocksize - 1)) {
+ /*
+ * Attach jinode to inode for jbd2 if we do any zeroing of
+ * partial block
+ */
+ ret = ext4_inode_attach_jinode(inode);
+ if (ret < 0)
+ goto out_mutex;
+
+ }
+
first_block_offset = round_up(offset, sb->s_blocksize);
last_block_offset = round_down((offset + length), sb->s_blocksize) - 1;
@@ -3604,6 +3613,31 @@
return ret;
}
+int ext4_inode_attach_jinode(struct inode *inode)
+{
+ struct ext4_inode_info *ei = EXT4_I(inode);
+ struct jbd2_inode *jinode;
+
+ if (ei->jinode || !EXT4_SB(inode->i_sb)->s_journal)
+ return 0;
+
+ jinode = jbd2_alloc_inode(GFP_KERNEL);
+ spin_lock(&inode->i_lock);
+ if (!ei->jinode) {
+ if (!jinode) {
+ spin_unlock(&inode->i_lock);
+ return -ENOMEM;
+ }
+ ei->jinode = jinode;
+ jbd2_journal_init_jbd_inode(ei->jinode, inode);
+ jinode = NULL;
+ }
+ spin_unlock(&inode->i_lock);
+ if (unlikely(jinode != NULL))
+ jbd2_free_inode(jinode);
+ return 0;
+}
+
/*
* ext4_truncate()
*
@@ -3664,6 +3698,12 @@
return;
}
+ /* If we zero-out tail of the page, we have to create jinode for jbd2 */
+ if (inode->i_size & (inode->i_sb->s_blocksize - 1)) {
+ if (ext4_inode_attach_jinode(inode) < 0)
+ return;
+ }
+
if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
credits = ext4_writepage_trans_blocks(inode);
else
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index 9491ac0..c0427e2 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -77,8 +77,10 @@
memswap(ei1->i_data, ei2->i_data, sizeof(ei1->i_data));
memswap(&ei1->i_flags, &ei2->i_flags, sizeof(ei1->i_flags));
memswap(&ei1->i_disksize, &ei2->i_disksize, sizeof(ei1->i_disksize));
- memswap(&ei1->i_es_tree, &ei2->i_es_tree, sizeof(ei1->i_es_tree));
- memswap(&ei1->i_es_lru_nr, &ei2->i_es_lru_nr, sizeof(ei1->i_es_lru_nr));
+ ext4_es_remove_extent(inode1, 0, EXT_MAX_BLOCKS);
+ ext4_es_remove_extent(inode2, 0, EXT_MAX_BLOCKS);
+ ext4_es_lru_del(inode1);
+ ext4_es_lru_del(inode2);
isize = i_size_read(inode1);
i_size_write(inode1, i_size_read(inode2));
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index bca26f3..b59373b 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -1359,7 +1359,7 @@
{Opt_delalloc, EXT4_MOUNT_DELALLOC,
MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT},
{Opt_nodelalloc, EXT4_MOUNT_DELALLOC,
- MOPT_EXT4_ONLY | MOPT_CLEAR | MOPT_EXPLICIT},
+ MOPT_EXT4_ONLY | MOPT_CLEAR},
{Opt_journal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM,
MOPT_EXT4_ONLY | MOPT_SET},
{Opt_journal_async_commit, (EXT4_MOUNT_JOURNAL_ASYNC_COMMIT |
@@ -3483,7 +3483,7 @@
}
if (test_opt(sb, DIOREAD_NOLOCK)) {
ext4_msg(sb, KERN_ERR, "can't mount with "
- "both data=journal and delalloc");
+ "both data=journal and dioread_nolock");
goto failed_mount;
}
if (test_opt(sb, DELALLOC))
@@ -4727,6 +4727,21 @@
goto restore_opts;
}
+ if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) {
+ if (test_opt2(sb, EXPLICIT_DELALLOC)) {
+ ext4_msg(sb, KERN_ERR, "can't mount with "
+ "both data=journal and delalloc");
+ err = -EINVAL;
+ goto restore_opts;
+ }
+ if (test_opt(sb, DIOREAD_NOLOCK)) {
+ ext4_msg(sb, KERN_ERR, "can't mount with "
+ "both data=journal and dioread_nolock");
+ err = -EINVAL;
+ goto restore_opts;
+ }
+ }
+
if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED)
ext4_abort(sb, "Abort forced by user");
@@ -5481,6 +5496,7 @@
kset_unregister(ext4_kset);
ext4_exit_system_zone();
ext4_exit_pageio();
+ ext4_exit_es();
}
MODULE_AUTHOR("Remy Card, Stephen Tweedie, Andrew Morton, Andreas Dilger, Theodore Ts'o and others");
diff --git a/fs/fcntl.c b/fs/fcntl.c
index 6599222..65343c3 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -730,14 +730,14 @@
* Exceptions: O_NONBLOCK is a two bit define on parisc; O_NDELAY
* is defined as O_NONBLOCK on some platforms and not on others.
*/
- BUILD_BUG_ON(19 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
+ BUILD_BUG_ON(20 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
O_RDONLY | O_WRONLY | O_RDWR |
O_CREAT | O_EXCL | O_NOCTTY |
O_TRUNC | O_APPEND | /* O_NONBLOCK | */
__O_SYNC | O_DSYNC | FASYNC |
O_DIRECT | O_LARGEFILE | O_DIRECTORY |
O_NOFOLLOW | O_NOATIME | O_CLOEXEC |
- __FMODE_EXEC | O_PATH
+ __FMODE_EXEC | O_PATH | __O_TMPFILE
));
fasync_cache = kmem_cache_create("fasync_cache",
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 9435384..544a809 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -1838,14 +1838,14 @@
glock_workqueue = alloc_workqueue("glock_workqueue", WQ_MEM_RECLAIM |
WQ_HIGHPRI | WQ_FREEZABLE, 0);
- if (IS_ERR(glock_workqueue))
- return PTR_ERR(glock_workqueue);
+ if (!glock_workqueue)
+ return -ENOMEM;
gfs2_delete_workqueue = alloc_workqueue("delete_workqueue",
WQ_MEM_RECLAIM | WQ_FREEZABLE,
0);
- if (IS_ERR(gfs2_delete_workqueue)) {
+ if (!gfs2_delete_workqueue) {
destroy_workqueue(glock_workqueue);
- return PTR_ERR(gfs2_delete_workqueue);
+ return -ENOMEM;
}
register_shrinker(&glock_shrinker);
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index 5f2e522..e2e0a90 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -47,7 +47,8 @@
* None of the buffers should be dirty, locked, or pinned.
*/
-static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync)
+static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync,
+ unsigned int nr_revokes)
{
struct gfs2_sbd *sdp = gl->gl_sbd;
struct list_head *head = &gl->gl_ail_list;
@@ -57,7 +58,9 @@
gfs2_log_lock(sdp);
spin_lock(&sdp->sd_ail_lock);
- list_for_each_entry_safe(bd, tmp, head, bd_ail_gl_list) {
+ list_for_each_entry_safe_reverse(bd, tmp, head, bd_ail_gl_list) {
+ if (nr_revokes == 0)
+ break;
bh = bd->bd_bh;
if (bh->b_state & b_state) {
if (fsync)
@@ -65,6 +68,7 @@
gfs2_ail_error(gl, bh);
}
gfs2_trans_add_revoke(sdp, bd);
+ nr_revokes--;
}
GLOCK_BUG_ON(gl, !fsync && atomic_read(&gl->gl_ail_count));
spin_unlock(&sdp->sd_ail_lock);
@@ -91,7 +95,7 @@
WARN_ON_ONCE(current->journal_info);
current->journal_info = &tr;
- __gfs2_ail_flush(gl, 0);
+ __gfs2_ail_flush(gl, 0, tr.tr_revokes);
gfs2_trans_end(sdp);
gfs2_log_flush(sdp, NULL);
@@ -101,15 +105,19 @@
{
struct gfs2_sbd *sdp = gl->gl_sbd;
unsigned int revokes = atomic_read(&gl->gl_ail_count);
+ unsigned int max_revokes = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / sizeof(u64);
int ret;
if (!revokes)
return;
- ret = gfs2_trans_begin(sdp, 0, revokes);
+ while (revokes > max_revokes)
+ max_revokes += (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header)) / sizeof(u64);
+
+ ret = gfs2_trans_begin(sdp, 0, max_revokes);
if (ret)
return;
- __gfs2_ail_flush(gl, fsync);
+ __gfs2_ail_flush(gl, fsync, max_revokes);
gfs2_trans_end(sdp);
gfs2_log_flush(sdp, NULL);
}
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index bbb2715..64915ee 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -594,7 +594,7 @@
}
gfs2_glock_dq_uninit(ghs);
if (IS_ERR(d))
- return PTR_RET(d);
+ return PTR_ERR(d);
return error;
} else if (error != -ENOENT) {
goto fail_gunlock;
@@ -1750,6 +1750,10 @@
struct gfs2_holder gh;
int ret;
+ /* For selinux during lookup */
+ if (gfs2_glock_is_locked_by_me(ip->i_gl))
+ return generic_getxattr(dentry, name, data, size);
+
gfs2_holder_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &gh);
ret = gfs2_glock_nq(&gh);
if (ret == 0) {
diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c
index e04d0e0..7b0f504 100644
--- a/fs/gfs2/main.c
+++ b/fs/gfs2/main.c
@@ -155,7 +155,7 @@
goto fail_wq;
gfs2_control_wq = alloc_workqueue("gfs2_control",
- WQ_NON_REENTRANT | WQ_UNBOUND | WQ_FREEZABLE, 0);
+ WQ_UNBOUND | WQ_FREEZABLE, 0);
if (!gfs2_control_wq)
goto fail_recovery;
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index a3f868a..d19b30a 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -463,6 +463,14 @@
return inode;
}
+/*
+ * Hugetlbfs is not reclaimable; therefore its i_mmap_mutex will never
+ * be taken from reclaim -- unlike regular filesystems. This needs an
+ * annotation because huge_pmd_share() does an allocation under
+ * i_mmap_mutex.
+ */
+struct lock_class_key hugetlbfs_i_mmap_mutex_key;
+
static struct inode *hugetlbfs_get_inode(struct super_block *sb,
struct inode *dir,
umode_t mode, dev_t dev)
@@ -474,6 +482,8 @@
struct hugetlbfs_inode_info *info;
inode->i_ino = get_next_ino();
inode_init_owner(inode, dir, mode);
+ lockdep_set_class(&inode->i_mapping->i_mmap_mutex,
+ &hugetlbfs_i_mmap_mutex_key);
inode->i_mapping->a_ops = &hugetlbfs_aops;
inode->i_mapping->backing_dev_info =&hugetlbfs_backing_dev_info;
inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
@@ -916,14 +926,8 @@
return h - hstates;
}
-static char *hugetlb_dname(struct dentry *dentry, char *buffer, int buflen)
-{
- return dynamic_dname(dentry, buffer, buflen, "/%s (deleted)",
- dentry->d_name.name);
-}
-
static struct dentry_operations anon_ops = {
- .d_dname = hugetlb_dname
+ .d_dname = simple_dname
};
/*
diff --git a/fs/lockd/clntlock.c b/fs/lockd/clntlock.c
index 01bfe766..41e491b 100644
--- a/fs/lockd/clntlock.c
+++ b/fs/lockd/clntlock.c
@@ -64,12 +64,17 @@
nlm_init->protocol, nlm_version,
nlm_init->hostname, nlm_init->noresvport,
nlm_init->net);
- if (host == NULL) {
- lockd_down(nlm_init->net);
- return ERR_PTR(-ENOLCK);
- }
+ if (host == NULL)
+ goto out_nohost;
+ if (host->h_rpcclnt == NULL && nlm_bind_host(host) == NULL)
+ goto out_nobind;
return host;
+out_nobind:
+ nlmclnt_release_host(host);
+out_nohost:
+ lockd_down(nlm_init->net);
+ return ERR_PTR(-ENOLCK);
}
EXPORT_SYMBOL_GPL(nlmclnt_init);
diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
index 9760ecb..acd3947 100644
--- a/fs/lockd/clntproc.c
+++ b/fs/lockd/clntproc.c
@@ -125,14 +125,15 @@
{
struct nlm_args *argp = &req->a_args;
struct nlm_lock *lock = &argp->lock;
+ char *nodename = req->a_host->h_rpcclnt->cl_nodename;
nlmclnt_next_cookie(&argp->cookie);
memcpy(&lock->fh, NFS_FH(file_inode(fl->fl_file)), sizeof(struct nfs_fh));
- lock->caller = utsname()->nodename;
+ lock->caller = nodename;
lock->oh.data = req->a_owner;
lock->oh.len = snprintf(req->a_owner, sizeof(req->a_owner), "%u@%s",
(unsigned int)fl->fl_u.nfs_fl.owner->pid,
- utsname()->nodename);
+ nodename);
lock->svid = fl->fl_u.nfs_fl.owner->pid;
lock->fl.fl_start = fl->fl_start;
lock->fl.fl_end = fl->fl_end;
diff --git a/fs/namei.c b/fs/namei.c
index 8b61d10..89a612e 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -3671,15 +3671,11 @@
if ((flags & ~(AT_SYMLINK_FOLLOW | AT_EMPTY_PATH)) != 0)
return -EINVAL;
/*
- * To use null names we require CAP_DAC_READ_SEARCH
- * This ensures that not everyone will be able to create
- * handlink using the passed filedescriptor.
+ * Using empty names is equivalent to using AT_SYMLINK_FOLLOW
+ * on /proc/self/fd/<fd>.
*/
- if (flags & AT_EMPTY_PATH) {
- if (!capable(CAP_DAC_READ_SEARCH))
- return -ENOENT;
+ if (flags & AT_EMPTY_PATH)
how = LOOKUP_EMPTY;
- }
if (flags & AT_SYMLINK_FOLLOW)
how |= LOOKUP_FOLLOW;
diff --git a/fs/namespace.c b/fs/namespace.c
index 7b1ca9b..a45ba4f 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -1429,7 +1429,7 @@
CL_COPY_ALL | CL_PRIVATE);
namespace_unlock();
if (IS_ERR(tree))
- return NULL;
+ return ERR_CAST(tree);
return &tree->mnt;
}
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index af6e806..941246f 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -463,7 +463,6 @@
unlock_new_inode(inode);
} else
nfs_refresh_inode(inode, fattr);
- nfs_setsecurity(inode, fattr, label);
dprintk("NFS: nfs_fhget(%s/%Ld fh_crc=0x%08x ct=%d)\n",
inode->i_sb->s_id,
(long long)NFS_FILEID(inode),
@@ -963,9 +962,15 @@
static int nfs_invalidate_mapping(struct inode *inode, struct address_space *mapping)
{
struct nfs_inode *nfsi = NFS_I(inode);
-
+ int ret;
+
if (mapping->nrpages != 0) {
- int ret = invalidate_inode_pages2(mapping);
+ if (S_ISREG(inode->i_mode)) {
+ ret = nfs_sync_mapping(mapping);
+ if (ret < 0)
+ return ret;
+ }
+ ret = invalidate_inode_pages2(mapping);
if (ret < 0)
return ret;
}
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index cf11799..108a774 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -3071,15 +3071,13 @@
nfs4_proc_lookup_mountpoint(struct inode *dir, struct qstr *name,
struct nfs_fh *fhandle, struct nfs_fattr *fattr)
{
+ struct rpc_clnt *client = NFS_CLIENT(dir);
int status;
- struct rpc_clnt *client = rpc_clone_client(NFS_CLIENT(dir));
status = nfs4_proc_lookup_common(&client, dir, name, fhandle, fattr, NULL);
- if (status < 0) {
- rpc_shutdown_client(client);
+ if (status < 0)
return ERR_PTR(status);
- }
- return client;
+ return (client == NFS_CLIENT(dir)) ? rpc_clone_client(client) : client;
}
static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry)
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 71fdc0d..f6db66d 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -2478,6 +2478,10 @@
if (server->flags & NFS_MOUNT_NOAC)
sb_mntdata.mntflags |= MS_SYNCHRONOUS;
+ if (mount_info->cloned != NULL && mount_info->cloned->sb != NULL)
+ if (mount_info->cloned->sb->s_flags & MS_SYNCHRONOUS)
+ sb_mntdata.mntflags |= MS_SYNCHRONOUS;
+
/* Get a superblock - note that we may end up sharing one that already exists */
s = sget(nfs_mod->nfs_fs, compare_super, nfs_set_super, flags, &sb_mntdata);
if (IS_ERR(s)) {
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index 0d4c410..419572f 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -1524,7 +1524,7 @@
static inline u32 nfsd4_exchange_id_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
{
return (op_encode_hdr_size + 2 + 1 + /* eir_clientid, eir_sequenceid */\
- 1 + 1 + 0 + /* eir_flags, spr_how, SP4_NONE (for now) */\
+ 1 + 1 + 2 + /* eir_flags, spr_how, spo_must_enforce & _allow */\
2 + /*eir_server_owner.so_minor_id */\
/* eir_server_owner.so_major_id<> */\
XDR_QUADLEN(NFS4_OPAQUE_LIMIT) + 1 +\
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 280acef..43f4229 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -1264,6 +1264,8 @@
struct svc_cred *cr = &rqstp->rq_cred;
u32 service;
+ if (!cr->cr_gss_mech)
+ return false;
service = gss_pseudoflavor_to_service(cr->cr_gss_mech, cr->cr_flavor);
return service == RPC_GSS_SVC_INTEGRITY ||
service == RPC_GSS_SVC_PRIVACY;
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index 0c0f3ea9..c2a4701 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -3360,7 +3360,8 @@
8 /* eir_clientid */ +
4 /* eir_sequenceid */ +
4 /* eir_flags */ +
- 4 /* spr_how (SP4_NONE) */ +
+ 4 /* spr_how */ +
+ 8 /* spo_must_enforce, spo_must_allow */ +
8 /* so_minor_id */ +
4 /* so_major_id.len */ +
(XDR_QUADLEN(major_id_sz) * 4) +
@@ -3372,8 +3373,6 @@
WRITE32(exid->seqid);
WRITE32(exid->flags);
- /* state_protect4_r. Currently only support SP4_NONE */
- BUG_ON(exid->spa_how != SP4_NONE);
WRITE32(exid->spa_how);
switch (exid->spa_how) {
case SP4_NONE:
diff --git a/fs/nilfs2/segbuf.c b/fs/nilfs2/segbuf.c
index dc9a913..2d8be51 100644
--- a/fs/nilfs2/segbuf.c
+++ b/fs/nilfs2/segbuf.c
@@ -345,8 +345,7 @@
if (err == -EOPNOTSUPP) {
set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
- bio_put(bio);
- /* to be detected by submit_seg_bio() */
+ /* to be detected by nilfs_segbuf_submit_bio() */
}
if (!uptodate)
@@ -377,12 +376,12 @@
bio->bi_private = segbuf;
bio_get(bio);
submit_bio(mode, bio);
+ segbuf->sb_nbio++;
if (bio_flagged(bio, BIO_EOPNOTSUPP)) {
bio_put(bio);
err = -EOPNOTSUPP;
goto failed;
}
- segbuf->sb_nbio++;
bio_put(bio);
wi->bio = NULL;
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index 79736a2..2abf97b 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -1757,7 +1757,7 @@
goto out;
} else if (ret == 1) {
clusters_need = wc->w_clen;
- ret = ocfs2_refcount_cow(inode, filp, di_bh,
+ ret = ocfs2_refcount_cow(inode, di_bh,
wc->w_cpos, wc->w_clen, UINT_MAX);
if (ret) {
mlog_errno(ret);
diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c
index eb760d8..30544ce 100644
--- a/fs/ocfs2/dir.c
+++ b/fs/ocfs2/dir.c
@@ -2153,11 +2153,9 @@
{
int ret;
struct ocfs2_empty_dir_priv priv = {
- .ctx.actor = ocfs2_empty_dir_filldir
+ .ctx.actor = ocfs2_empty_dir_filldir,
};
- memset(&priv, 0, sizeof(priv));
-
if (ocfs2_dir_indexed(inode)) {
ret = ocfs2_empty_dir_dx(inode, &priv);
if (ret)
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 41000f2..3261d71 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -370,7 +370,7 @@
if (!(ext_flags & OCFS2_EXT_REFCOUNTED))
goto out;
- return ocfs2_refcount_cow(inode, NULL, fe_bh, cpos, 1, cpos+1);
+ return ocfs2_refcount_cow(inode, fe_bh, cpos, 1, cpos+1);
out:
return status;
@@ -899,7 +899,7 @@
zero_clusters = last_cpos - zero_cpos;
if (needs_cow) {
- rc = ocfs2_refcount_cow(inode, NULL, di_bh, zero_cpos,
+ rc = ocfs2_refcount_cow(inode, di_bh, zero_cpos,
zero_clusters, UINT_MAX);
if (rc) {
mlog_errno(rc);
@@ -2078,7 +2078,7 @@
*meta_level = 1;
- ret = ocfs2_refcount_cow(inode, file, di_bh, cpos, clusters, UINT_MAX);
+ ret = ocfs2_refcount_cow(inode, di_bh, cpos, clusters, UINT_MAX);
if (ret)
mlog_errno(ret);
out:
diff --git a/fs/ocfs2/journal.h b/fs/ocfs2/journal.h
index 96f9ac2..0a99273 100644
--- a/fs/ocfs2/journal.h
+++ b/fs/ocfs2/journal.h
@@ -537,7 +537,7 @@
extent_blocks = 1 + 1 + le16_to_cpu(root_el->l_tree_depth);
return bitmap_blocks + sysfile_bitmap_blocks + extent_blocks +
- ocfs2_quota_trans_credits(sb) + bits_wanted;
+ ocfs2_quota_trans_credits(sb);
}
static inline int ocfs2_calc_symlink_credits(struct super_block *sb)
diff --git a/fs/ocfs2/move_extents.c b/fs/ocfs2/move_extents.c
index f1fc172..452068b 100644
--- a/fs/ocfs2/move_extents.c
+++ b/fs/ocfs2/move_extents.c
@@ -69,7 +69,7 @@
u64 ino = ocfs2_metadata_cache_owner(context->et.et_ci);
u64 old_blkno = ocfs2_clusters_to_blocks(inode->i_sb, p_cpos);
- ret = ocfs2_duplicate_clusters_by_page(handle, context->file, cpos,
+ ret = ocfs2_duplicate_clusters_by_page(handle, inode, cpos,
p_cpos, new_p_cpos, len);
if (ret) {
mlog_errno(ret);
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
index 9f6b96a..a70d604 100644
--- a/fs/ocfs2/refcounttree.c
+++ b/fs/ocfs2/refcounttree.c
@@ -49,7 +49,6 @@
struct ocfs2_cow_context {
struct inode *inode;
- struct file *file;
u32 cow_start;
u32 cow_len;
struct ocfs2_extent_tree data_et;
@@ -66,7 +65,7 @@
u32 *num_clusters,
unsigned int *extent_flags);
int (*cow_duplicate_clusters)(handle_t *handle,
- struct file *file,
+ struct inode *inode,
u32 cpos, u32 old_cluster,
u32 new_cluster, u32 new_len);
};
@@ -2922,14 +2921,12 @@
}
int ocfs2_duplicate_clusters_by_page(handle_t *handle,
- struct file *file,
+ struct inode *inode,
u32 cpos, u32 old_cluster,
u32 new_cluster, u32 new_len)
{
int ret = 0, partial;
- struct inode *inode = file_inode(file);
- struct ocfs2_caching_info *ci = INODE_CACHE(inode);
- struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
+ struct super_block *sb = inode->i_sb;
u64 new_block = ocfs2_clusters_to_blocks(sb, new_cluster);
struct page *page;
pgoff_t page_index;
@@ -2978,13 +2975,6 @@
if (PAGE_CACHE_SIZE <= OCFS2_SB(sb)->s_clustersize)
BUG_ON(PageDirty(page));
- if (PageReadahead(page)) {
- page_cache_async_readahead(mapping,
- &file->f_ra, file,
- page, page_index,
- readahead_pages);
- }
-
if (!PageUptodate(page)) {
ret = block_read_full_page(page, ocfs2_get_block);
if (ret) {
@@ -3004,7 +2994,8 @@
}
}
- ocfs2_map_and_dirty_page(inode, handle, from, to,
+ ocfs2_map_and_dirty_page(inode,
+ handle, from, to,
page, 0, &new_block);
mark_page_accessed(page);
unlock:
@@ -3020,12 +3011,11 @@
}
int ocfs2_duplicate_clusters_by_jbd(handle_t *handle,
- struct file *file,
+ struct inode *inode,
u32 cpos, u32 old_cluster,
u32 new_cluster, u32 new_len)
{
int ret = 0;
- struct inode *inode = file_inode(file);
struct super_block *sb = inode->i_sb;
struct ocfs2_caching_info *ci = INODE_CACHE(inode);
int i, blocks = ocfs2_clusters_to_blocks(sb, new_len);
@@ -3150,7 +3140,7 @@
/*If the old clusters is unwritten, no need to duplicate. */
if (!(ext_flags & OCFS2_EXT_UNWRITTEN)) {
- ret = context->cow_duplicate_clusters(handle, context->file,
+ ret = context->cow_duplicate_clusters(handle, context->inode,
cpos, old, new, len);
if (ret) {
mlog_errno(ret);
@@ -3428,35 +3418,12 @@
return ret;
}
-static void ocfs2_readahead_for_cow(struct inode *inode,
- struct file *file,
- u32 start, u32 len)
-{
- struct address_space *mapping;
- pgoff_t index;
- unsigned long num_pages;
- int cs_bits = OCFS2_SB(inode->i_sb)->s_clustersize_bits;
-
- if (!file)
- return;
-
- mapping = file->f_mapping;
- num_pages = (len << cs_bits) >> PAGE_CACHE_SHIFT;
- if (!num_pages)
- num_pages = 1;
-
- index = ((loff_t)start << cs_bits) >> PAGE_CACHE_SHIFT;
- page_cache_sync_readahead(mapping, &file->f_ra, file,
- index, num_pages);
-}
-
/*
* Starting at cpos, try to CoW write_len clusters. Don't CoW
* past max_cpos. This will stop when it runs into a hole or an
* unrefcounted extent.
*/
static int ocfs2_refcount_cow_hunk(struct inode *inode,
- struct file *file,
struct buffer_head *di_bh,
u32 cpos, u32 write_len, u32 max_cpos)
{
@@ -3485,8 +3452,6 @@
BUG_ON(cow_len == 0);
- ocfs2_readahead_for_cow(inode, file, cow_start, cow_len);
-
context = kzalloc(sizeof(struct ocfs2_cow_context), GFP_NOFS);
if (!context) {
ret = -ENOMEM;
@@ -3508,7 +3473,6 @@
context->ref_root_bh = ref_root_bh;
context->cow_duplicate_clusters = ocfs2_duplicate_clusters_by_page;
context->get_clusters = ocfs2_di_get_clusters;
- context->file = file;
ocfs2_init_dinode_extent_tree(&context->data_et,
INODE_CACHE(inode), di_bh);
@@ -3537,7 +3501,6 @@
* clusters between cpos and cpos+write_len are safe to modify.
*/
int ocfs2_refcount_cow(struct inode *inode,
- struct file *file,
struct buffer_head *di_bh,
u32 cpos, u32 write_len, u32 max_cpos)
{
@@ -3557,7 +3520,7 @@
num_clusters = write_len;
if (ext_flags & OCFS2_EXT_REFCOUNTED) {
- ret = ocfs2_refcount_cow_hunk(inode, file, di_bh, cpos,
+ ret = ocfs2_refcount_cow_hunk(inode, di_bh, cpos,
num_clusters, max_cpos);
if (ret) {
mlog_errno(ret);
diff --git a/fs/ocfs2/refcounttree.h b/fs/ocfs2/refcounttree.h
index 7754608..6422bbc 100644
--- a/fs/ocfs2/refcounttree.h
+++ b/fs/ocfs2/refcounttree.h
@@ -53,7 +53,7 @@
int *credits,
int *ref_blocks);
int ocfs2_refcount_cow(struct inode *inode,
- struct file *filep, struct buffer_head *di_bh,
+ struct buffer_head *di_bh,
u32 cpos, u32 write_len, u32 max_cpos);
typedef int (ocfs2_post_refcount_func)(struct inode *inode,
@@ -85,11 +85,11 @@
u32 cpos, u32 write_len,
struct ocfs2_post_refcount *post);
int ocfs2_duplicate_clusters_by_page(handle_t *handle,
- struct file *file,
+ struct inode *inode,
u32 cpos, u32 old_cluster,
u32 new_cluster, u32 new_len);
int ocfs2_duplicate_clusters_by_jbd(handle_t *handle,
- struct file *file,
+ struct inode *inode,
u32 cpos, u32 old_cluster,
u32 new_cluster, u32 new_len);
int ocfs2_cow_sync_writeback(struct super_block *sb,
diff --git a/fs/open.c b/fs/open.c
index d53e298..7931f76 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -823,7 +823,7 @@
int lookup_flags = 0;
int acc_mode;
- if (flags & O_CREAT)
+ if (flags & (O_CREAT | __O_TMPFILE))
op->mode = (mode & S_IALLUGO) | S_IFREG;
else
op->mode = 0;
diff --git a/fs/proc/fd.c b/fs/proc/fd.c
index 75f2890..0ff80f9 100644
--- a/fs/proc/fd.c
+++ b/fs/proc/fd.c
@@ -230,8 +230,6 @@
if (!dir_emit_dots(file, ctx))
goto out;
- if (!dir_emit_dots(file, ctx))
- goto out;
files = get_files_struct(p);
if (!files)
goto out;
diff --git a/fs/proc/generic.c b/fs/proc/generic.c
index 94441a4..737e156 100644
--- a/fs/proc/generic.c
+++ b/fs/proc/generic.c
@@ -271,7 +271,7 @@
de = next;
} while (de);
spin_unlock(&proc_subdir_lock);
- return 0;
+ return 1;
}
int proc_readdir(struct file *file, struct dir_context *ctx)
diff --git a/fs/proc/root.c b/fs/proc/root.c
index 229e366..e0a790d 100644
--- a/fs/proc/root.c
+++ b/fs/proc/root.c
@@ -205,7 +205,9 @@
static int proc_root_readdir(struct file *file, struct dir_context *ctx)
{
if (ctx->pos < FIRST_PROCESS_ENTRY) {
- proc_readdir(file, ctx);
+ int error = proc_readdir(file, ctx);
+ if (unlikely(error <= 0))
+ return error;
ctx->pos = FIRST_PROCESS_ENTRY;
}
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index dbf61f6..107d026 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -730,8 +730,16 @@
* of how soft-dirty works.
*/
pte_t ptent = *pte;
- ptent = pte_wrprotect(ptent);
- ptent = pte_clear_flags(ptent, _PAGE_SOFT_DIRTY);
+
+ if (pte_present(ptent)) {
+ ptent = pte_wrprotect(ptent);
+ ptent = pte_clear_flags(ptent, _PAGE_SOFT_DIRTY);
+ } else if (is_swap_pte(ptent)) {
+ ptent = pte_swp_clear_soft_dirty(ptent);
+ } else if (pte_file(ptent)) {
+ ptent = pte_file_clear_soft_dirty(ptent);
+ }
+
set_pte_at(vma->vm_mm, addr, pte, ptent);
#endif
}
@@ -752,14 +760,15 @@
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
for (; addr != end; pte++, addr += PAGE_SIZE) {
ptent = *pte;
- if (!pte_present(ptent))
- continue;
if (cp->type == CLEAR_REFS_SOFT_DIRTY) {
clear_soft_dirty(vma, addr, pte);
continue;
}
+ if (!pte_present(ptent))
+ continue;
+
page = vm_normal_page(vma, addr, ptent);
if (!page)
continue;
@@ -859,7 +868,7 @@
} pagemap_entry_t;
struct pagemapread {
- int pos, len;
+ int pos, len; /* units: PM_ENTRY_BYTES, not bytes */
pagemap_entry_t *buffer;
bool v2;
};
@@ -867,7 +876,7 @@
#define PAGEMAP_WALK_SIZE (PMD_SIZE)
#define PAGEMAP_WALK_MASK (PMD_MASK)
-#define PM_ENTRY_BYTES sizeof(u64)
+#define PM_ENTRY_BYTES sizeof(pagemap_entry_t)
#define PM_STATUS_BITS 3
#define PM_STATUS_OFFSET (64 - PM_STATUS_BITS)
#define PM_STATUS_MASK (((1LL << PM_STATUS_BITS) - 1) << PM_STATUS_OFFSET)
@@ -930,8 +939,10 @@
flags = PM_PRESENT;
page = vm_normal_page(vma, addr, pte);
} else if (is_swap_pte(pte)) {
- swp_entry_t entry = pte_to_swp_entry(pte);
-
+ swp_entry_t entry;
+ if (pte_swp_soft_dirty(pte))
+ flags2 |= __PM_SOFT_DIRTY;
+ entry = pte_to_swp_entry(pte);
frame = swp_type(entry) |
(swp_offset(entry) << MAX_SWAPFILES_SHIFT);
flags = PM_SWAP;
@@ -1116,8 +1127,8 @@
goto out_task;
pm.v2 = soft_dirty_cleared;
- pm.len = PM_ENTRY_BYTES * (PAGEMAP_WALK_SIZE >> PAGE_SHIFT);
- pm.buffer = kmalloc(pm.len, GFP_TEMPORARY);
+ pm.len = (PAGEMAP_WALK_SIZE >> PAGE_SHIFT);
+ pm.buffer = kmalloc(pm.len * PM_ENTRY_BYTES, GFP_TEMPORARY);
ret = -ENOMEM;
if (!pm.buffer)
goto out_task;
diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
index 33532f7..a958444 100644
--- a/fs/reiserfs/procfs.c
+++ b/fs/reiserfs/procfs.c
@@ -19,12 +19,13 @@
/*
* LOCKING:
*
- * We rely on new Alexander Viro's super-block locking.
+ * These guys are evicted from procfs as the very first step in ->kill_sb().
*
*/
-static int show_version(struct seq_file *m, struct super_block *sb)
+static int show_version(struct seq_file *m, void *unused)
{
+ struct super_block *sb = m->private;
char *format;
if (REISERFS_SB(sb)->s_properties & (1 << REISERFS_3_6)) {
@@ -66,8 +67,9 @@
#define DJP( x ) le32_to_cpu( jp -> x )
#define JF( x ) ( r -> s_journal -> x )
-static int show_super(struct seq_file *m, struct super_block *sb)
+static int show_super(struct seq_file *m, void *unused)
{
+ struct super_block *sb = m->private;
struct reiserfs_sb_info *r = REISERFS_SB(sb);
seq_printf(m, "state: \t%s\n"
@@ -128,8 +130,9 @@
return 0;
}
-static int show_per_level(struct seq_file *m, struct super_block *sb)
+static int show_per_level(struct seq_file *m, void *unused)
{
+ struct super_block *sb = m->private;
struct reiserfs_sb_info *r = REISERFS_SB(sb);
int level;
@@ -186,8 +189,9 @@
return 0;
}
-static int show_bitmap(struct seq_file *m, struct super_block *sb)
+static int show_bitmap(struct seq_file *m, void *unused)
{
+ struct super_block *sb = m->private;
struct reiserfs_sb_info *r = REISERFS_SB(sb);
seq_printf(m, "free_block: %lu\n"
@@ -218,8 +222,9 @@
return 0;
}
-static int show_on_disk_super(struct seq_file *m, struct super_block *sb)
+static int show_on_disk_super(struct seq_file *m, void *unused)
{
+ struct super_block *sb = m->private;
struct reiserfs_sb_info *sb_info = REISERFS_SB(sb);
struct reiserfs_super_block *rs = sb_info->s_rs;
int hash_code = DFL(s_hash_function_code);
@@ -261,8 +266,9 @@
return 0;
}
-static int show_oidmap(struct seq_file *m, struct super_block *sb)
+static int show_oidmap(struct seq_file *m, void *unused)
{
+ struct super_block *sb = m->private;
struct reiserfs_sb_info *sb_info = REISERFS_SB(sb);
struct reiserfs_super_block *rs = sb_info->s_rs;
unsigned int mapsize = le16_to_cpu(rs->s_v1.s_oid_cursize);
@@ -291,8 +297,9 @@
return 0;
}
-static int show_journal(struct seq_file *m, struct super_block *sb)
+static int show_journal(struct seq_file *m, void *unused)
{
+ struct super_block *sb = m->private;
struct reiserfs_sb_info *r = REISERFS_SB(sb);
struct reiserfs_super_block *rs = r->s_rs;
struct journal_params *jp = &rs->s_v1.s_journal;
@@ -383,92 +390,24 @@
return 0;
}
-/* iterator */
-static int test_sb(struct super_block *sb, void *data)
-{
- return data == sb;
-}
-
-static int set_sb(struct super_block *sb, void *data)
-{
- return -ENOENT;
-}
-
-struct reiserfs_seq_private {
- struct super_block *sb;
- int (*show) (struct seq_file *, struct super_block *);
-};
-
-static void *r_start(struct seq_file *m, loff_t * pos)
-{
- struct reiserfs_seq_private *priv = m->private;
- loff_t l = *pos;
-
- if (l)
- return NULL;
-
- if (IS_ERR(sget(&reiserfs_fs_type, test_sb, set_sb, 0, priv->sb)))
- return NULL;
-
- up_write(&priv->sb->s_umount);
- return priv->sb;
-}
-
-static void *r_next(struct seq_file *m, void *v, loff_t * pos)
-{
- ++*pos;
- if (v)
- deactivate_super(v);
- return NULL;
-}
-
-static void r_stop(struct seq_file *m, void *v)
-{
- if (v)
- deactivate_super(v);
-}
-
-static int r_show(struct seq_file *m, void *v)
-{
- struct reiserfs_seq_private *priv = m->private;
- return priv->show(m, v);
-}
-
-static const struct seq_operations r_ops = {
- .start = r_start,
- .next = r_next,
- .stop = r_stop,
- .show = r_show,
-};
-
static int r_open(struct inode *inode, struct file *file)
{
- struct reiserfs_seq_private *priv;
- int ret = seq_open_private(file, &r_ops,
- sizeof(struct reiserfs_seq_private));
-
- if (!ret) {
- struct seq_file *m = file->private_data;
- priv = m->private;
- priv->sb = proc_get_parent_data(inode);
- priv->show = PDE_DATA(inode);
- }
- return ret;
+ return single_open(file, PDE_DATA(inode),
+ proc_get_parent_data(inode));
}
static const struct file_operations r_file_operations = {
.open = r_open,
.read = seq_read,
.llseek = seq_lseek,
- .release = seq_release_private,
- .owner = THIS_MODULE,
+ .release = single_release,
};
static struct proc_dir_entry *proc_info_root = NULL;
static const char proc_info_root_name[] = "fs/reiserfs";
static void add_file(struct super_block *sb, char *name,
- int (*func) (struct seq_file *, struct super_block *))
+ int (*func) (struct seq_file *, void *))
{
proc_create_data(name, 0, REISERFS_SB(sb)->procdir,
&r_file_operations, func);
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index f8a23c3..e2e202a 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -499,6 +499,7 @@
static void reiserfs_kill_sb(struct super_block *s)
{
if (REISERFS_SB(s)) {
+ reiserfs_proc_info_done(s);
/*
* Force any pending inode evictions to occur now. Any
* inodes to be removed that have extended attributes
@@ -554,8 +555,6 @@
REISERFS_SB(s)->reserved_blocks);
}
- reiserfs_proc_info_done(s);
-
reiserfs_write_unlock(s);
mutex_destroy(&REISERFS_SB(s)->lock);
kfree(s->s_fs_info);
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index 56e6b68..94383a7 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -274,15 +274,12 @@
};
struct acpi_device_physical_node {
- u8 node_id;
+ unsigned int node_id;
struct list_head node;
struct device *dev;
bool put_online:1;
};
-/* set maximum of physical nodes to 32 for expansibility */
-#define ACPI_MAX_PHYSICAL_NODE 32
-
/* Device */
struct acpi_device {
int device_type;
@@ -302,10 +299,9 @@
struct acpi_driver *driver;
void *driver_data;
struct device dev;
- u8 physical_node_count;
+ unsigned int physical_node_count;
struct list_head physical_node_list;
struct mutex physical_node_lock;
- DECLARE_BITMAP(physical_node_id_bitmap, ACPI_MAX_PHYSICAL_NODE);
struct list_head power_dependent;
void (*remove)(struct acpi_device *);
};
@@ -445,7 +441,11 @@
};
/* helper */
-acpi_handle acpi_get_child(acpi_handle, u64);
+acpi_handle acpi_find_child(acpi_handle, u64, bool);
+static inline acpi_handle acpi_get_child(acpi_handle handle, u64 addr)
+{
+ return acpi_find_child(handle, addr, false);
+}
int acpi_is_root_bridge(acpi_handle);
struct acpi_pci_root *acpi_pci_find_root(acpi_handle handle);
#define DEVICE_ACPI_HANDLE(dev) ((acpi_handle)ACPI_HANDLE(dev))
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 2f47ade..0807ddf 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -417,6 +417,36 @@
{
return pmd;
}
+
+static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
+{
+ return pte;
+}
+
+static inline int pte_swp_soft_dirty(pte_t pte)
+{
+ return 0;
+}
+
+static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
+{
+ return pte;
+}
+
+static inline pte_t pte_file_clear_soft_dirty(pte_t pte)
+{
+ return pte;
+}
+
+static inline pte_t pte_file_mksoft_dirty(pte_t pte)
+{
+ return pte;
+}
+
+static inline int pte_file_soft_dirty(pte_t pte)
+{
+ return 0;
+}
#endif
#ifndef __HAVE_PFNMAP_TRACKING
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index 13821c3..5672d7e 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -112,7 +112,7 @@
#define HAVE_GENERIC_MMU_GATHER
-void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm);
+void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end);
void tlb_flush_mmu(struct mmu_gather *tlb);
void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start,
unsigned long end);
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index b90337c..4a12532 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -336,6 +336,7 @@
* helper function for dentry_operations.d_dname() members
*/
extern char *dynamic_dname(struct dentry *, char *, int, const char *, ...);
+extern char *simple_dname(struct dentry *, char *, int);
extern char *__d_path(const struct path *, const struct path *, char *, int);
extern char *d_absolute_path(const struct path *, char *, int);
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index 4372658..120d57a 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -78,6 +78,11 @@
/* trace_seq for __print_flags() and __print_symbolic() etc. */
struct trace_seq tmp_seq;
+ cpumask_var_t started;
+
+ /* it's true when current open file is snapshot */
+ bool snapshot;
+
/* The below is zeroed out in pipe_read */
struct trace_seq seq;
struct trace_entry *ent;
@@ -90,10 +95,7 @@
loff_t pos;
long idx;
- cpumask_var_t started;
-
- /* it's true when current open file is snapshot */
- bool snapshot;
+ /* All new field here will be zeroed out in pipe_read */
};
enum trace_iter_flags {
@@ -332,7 +334,7 @@
const char *name, int offset, int size,
int is_signed, int filter_type);
extern int trace_add_event_call(struct ftrace_event_call *call);
-extern void trace_remove_event_call(struct ftrace_event_call *call);
+extern int trace_remove_event_call(struct ftrace_event_call *call);
#define is_signed_type(type) (((type)(-1)) < (type)1)
diff --git a/include/linux/iio/trigger.h b/include/linux/iio/trigger.h
index 3869c52..369cf2c 100644
--- a/include/linux/iio/trigger.h
+++ b/include/linux/iio/trigger.h
@@ -8,6 +8,7 @@
*/
#include <linux/irq.h>
#include <linux/module.h>
+#include <linux/atomic.h>
#ifndef _IIO_TRIGGER_H_
#define _IIO_TRIGGER_H_
@@ -61,7 +62,7 @@
struct list_head list;
struct list_head alloc_list;
- int use_count;
+ atomic_t use_count;
struct irq_chip subirq_chip;
int subirq_base;
diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h
index b99cd23..79640e01 100644
--- a/include/linux/inetdevice.h
+++ b/include/linux/inetdevice.h
@@ -5,45 +5,13 @@
#include <linux/bitmap.h>
#include <linux/if.h>
+#include <linux/ip.h>
#include <linux/netdevice.h>
#include <linux/rcupdate.h>
#include <linux/timer.h>
#include <linux/sysctl.h>
#include <linux/rtnetlink.h>
-enum
-{
- IPV4_DEVCONF_FORWARDING=1,
- IPV4_DEVCONF_MC_FORWARDING,
- IPV4_DEVCONF_PROXY_ARP,
- IPV4_DEVCONF_ACCEPT_REDIRECTS,
- IPV4_DEVCONF_SECURE_REDIRECTS,
- IPV4_DEVCONF_SEND_REDIRECTS,
- IPV4_DEVCONF_SHARED_MEDIA,
- IPV4_DEVCONF_RP_FILTER,
- IPV4_DEVCONF_ACCEPT_SOURCE_ROUTE,
- IPV4_DEVCONF_BOOTP_RELAY,
- IPV4_DEVCONF_LOG_MARTIANS,
- IPV4_DEVCONF_TAG,
- IPV4_DEVCONF_ARPFILTER,
- IPV4_DEVCONF_MEDIUM_ID,
- IPV4_DEVCONF_NOXFRM,
- IPV4_DEVCONF_NOPOLICY,
- IPV4_DEVCONF_FORCE_IGMP_VERSION,
- IPV4_DEVCONF_ARP_ANNOUNCE,
- IPV4_DEVCONF_ARP_IGNORE,
- IPV4_DEVCONF_PROMOTE_SECONDARIES,
- IPV4_DEVCONF_ARP_ACCEPT,
- IPV4_DEVCONF_ARP_NOTIFY,
- IPV4_DEVCONF_ACCEPT_LOCAL,
- IPV4_DEVCONF_SRC_VMARK,
- IPV4_DEVCONF_PROXY_ARP_PVLAN,
- IPV4_DEVCONF_ROUTE_LOCALNET,
- __IPV4_DEVCONF_MAX
-};
-
-#define IPV4_DEVCONF_MAX (__IPV4_DEVCONF_MAX - 1)
-
struct ipv4_devconf {
void *sysctl;
int data[IPV4_DEVCONF_MAX];
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 850e95b..b8b7dc7 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -101,6 +101,7 @@
#define IP6SKB_FORWARDED 2
#define IP6SKB_REROUTED 4
#define IP6SKB_ROUTERALERT 8
+#define IP6SKB_FRAGMENTED 16
};
#define IP6CB(skb) ((struct inet6_skb_parm*)((skb)->cb))
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 3bef14c..482ad2d 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -629,7 +629,7 @@
static inline void tracing_start(void) { }
static inline void tracing_stop(void) { }
static inline void ftrace_off_permanent(void) { }
-static inline void trace_dump_stack(void) { }
+static inline void trace_dump_stack(int skip) { }
static inline void tracing_on(void) { }
static inline void tracing_off(void) { }
diff --git a/include/linux/mfd/ti_am335x_tscadc.h b/include/linux/mfd/ti_am335x_tscadc.h
index 8d73fe2..db1791b 100644
--- a/include/linux/mfd/ti_am335x_tscadc.h
+++ b/include/linux/mfd/ti_am335x_tscadc.h
@@ -113,11 +113,27 @@
#define CNTRLREG_8WIRE CNTRLREG_AFE_CTRL(3)
#define CNTRLREG_TSCENB BIT(7)
+/* FIFO READ Register */
+#define FIFOREAD_DATA_MASK (0xfff << 0)
+#define FIFOREAD_CHNLID_MASK (0xf << 16)
+
+/* Sequencer Status */
+#define SEQ_STATUS BIT(5)
+
#define ADC_CLK 3000000
#define MAX_CLK_DIV 7
#define TOTAL_STEPS 16
#define TOTAL_CHANNELS 8
+/*
+* ADC runs at 3MHz, and it takes
+* 15 cycles to latch one data output.
+* Hence the idle time for ADC to
+* process one sample data would be
+* around 5 micro seconds.
+*/
+#define IDLE_TIMEOUT 5 /* microsec */
+
#define TSCADC_CELLS 2
struct ti_tscadc_dev {
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 737685e..68029b3 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -309,21 +309,20 @@
__be16 max_desc_sz_rq;
u8 rsvd21[2];
__be16 max_desc_sz_sq_dc;
- u8 rsvd22[4];
- __be16 max_qp_mcg;
- u8 rsvd23;
+ __be32 max_qp_mcg;
+ u8 rsvd22[3];
u8 log_max_mcg;
- u8 rsvd24;
+ u8 rsvd23;
u8 log_max_pd;
- u8 rsvd25;
+ u8 rsvd24;
u8 log_max_xrcd;
- u8 rsvd26[42];
+ u8 rsvd25[42];
__be16 log_uar_page_sz;
- u8 rsvd27[28];
+ u8 rsvd26[28];
u8 log_msx_atomic_size_qp;
- u8 rsvd28[2];
+ u8 rsvd27[2];
u8 log_msx_atomic_size_dc;
- u8 rsvd29[76];
+ u8 rsvd28[76];
};
@@ -472,9 +471,8 @@
struct mlx5_eqe_page_req {
u8 rsvd0[2];
__be16 func_id;
- u8 rsvd1[2];
- __be16 num_pages;
- __be32 rsvd2[5];
+ __be32 num_pages;
+ __be32 rsvd1[5];
};
union ev_data {
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 2aa258b..8888381 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -358,7 +358,7 @@
u32 reserved_lkey;
u8 local_ca_ack_delay;
u8 log_max_mcg;
- u16 max_qp_mcg;
+ u32 max_qp_mcg;
int min_page_sz;
};
@@ -691,7 +691,7 @@
int mlx5_pagealloc_start(struct mlx5_core_dev *dev);
void mlx5_pagealloc_stop(struct mlx5_core_dev *dev);
void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
- s16 npages);
+ s32 npages);
int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot);
int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev);
void mlx5_register_debugfs(void);
@@ -731,9 +731,6 @@
int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db);
void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db);
-typedef void (*health_handler_t)(struct pci_dev *pdev, struct health_buffer __iomem *buf, int size);
-int mlx5_register_health_report_handler(health_handler_t handler);
-void mlx5_unregister_health_report_handler(void);
const char *mlx5_command_str(int command);
int mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev);
void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev);
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index fb425aa..faf4b7c 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -332,6 +332,7 @@
unsigned long pgoff, unsigned long flags);
#endif
unsigned long mmap_base; /* base of mmap area */
+ unsigned long mmap_legacy_base; /* base of mmap area in bottom-up allocations */
unsigned long task_size; /* size of task vm space */
unsigned long highest_vm_end; /* highest vma end address */
pgd_t * pgd;
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index 75981d0..580a532 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -15,6 +15,7 @@
#include <linux/list.h>
#include <linux/rbtree.h>
+#include <linux/err.h>
struct module;
struct device;
diff --git a/include/linux/sched.h b/include/linux/sched.h
index d722490..078066d 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1532,6 +1532,8 @@
* Test if a process is not yet dead (at most zombie state)
* If pid_alive fails, then pointers within the task structure
* can be stale and must not be dereferenced.
+ *
+ * Return: 1 if the process is alive. 0 otherwise.
*/
static inline int pid_alive(struct task_struct *p)
{
@@ -1543,6 +1545,8 @@
* @tsk: Task structure to be checked.
*
* Check if a task structure is the first user space task the kernel created.
+ *
+ * Return: 1 if the task structure is init. 0 otherwise.
*/
static inline int is_global_init(struct task_struct *tsk)
{
@@ -1894,6 +1898,8 @@
/**
* is_idle_task - is the specified task an idle task?
* @p: the task in question.
+ *
+ * Return: 1 if @p is an idle task. 0 otherwise.
*/
static inline bool is_idle_task(const struct task_struct *p)
{
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 7d537ce..75f3494 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -117,9 +117,17 @@
#endif /*arch_spin_is_contended*/
#endif
-/* The lock does not imply full memory barrier. */
-#ifndef ARCH_HAS_SMP_MB_AFTER_LOCK
-static inline void smp_mb__after_lock(void) { smp_mb(); }
+/*
+ * Despite its name it doesn't necessarily has to be a full barrier.
+ * It should only guarantee that a STORE before the critical section
+ * can not be reordered with a LOAD inside this section.
+ * spin_lock() is the one-way barrier, this LOAD can not escape out
+ * of the region. So the default implementation simply ensures that
+ * a STORE can not move into the critical section, smp_wmb() should
+ * serialize it with another STORE done by spin_lock().
+ */
+#ifndef smp_mb__before_spinlock
+#define smp_mb__before_spinlock() smp_wmb()
#endif
/**
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
index 6d87035..1821445 100644
--- a/include/linux/sunrpc/sched.h
+++ b/include/linux/sunrpc/sched.h
@@ -121,6 +121,7 @@
#define RPC_TASK_SOFTCONN 0x0400 /* Fail if can't connect */
#define RPC_TASK_SENT 0x0800 /* message was sent */
#define RPC_TASK_TIMEOUT 0x1000 /* fail with ETIMEDOUT on timeout */
+#define RPC_TASK_NOCONNECT 0x2000 /* return ENOTCONN if not connected */
#define RPC_IS_ASYNC(t) ((t)->tk_flags & RPC_TASK_ASYNC)
#define RPC_IS_SWAPPER(t) ((t)->tk_flags & RPC_TASK_SWAPPER)
diff --git a/include/linux/swapops.h b/include/linux/swapops.h
index c5fd30d..8d4fa82 100644
--- a/include/linux/swapops.h
+++ b/include/linux/swapops.h
@@ -67,6 +67,8 @@
swp_entry_t arch_entry;
BUG_ON(pte_file(pte));
+ if (pte_swp_soft_dirty(pte))
+ pte = pte_swp_clear_soft_dirty(pte);
arch_entry = __pte_to_swp_entry(pte);
return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry));
}
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 4147d70..84662ec 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -802,9 +802,14 @@
asmlinkage long sys_clone(unsigned long, unsigned long, int __user *, int,
int __user *);
#else
+#ifdef CONFIG_CLONE_BACKWARDS3
+asmlinkage long sys_clone(unsigned long, unsigned long, int, int __user *,
+ int __user *, int);
+#else
asmlinkage long sys_clone(unsigned long, unsigned long, int __user *,
int __user *, int);
#endif
+#endif
asmlinkage long sys_execve(const char __user *filename,
const char __user *const __user *argv,
diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
index b6b215f..14105c2 100644
--- a/include/linux/user_namespace.h
+++ b/include/linux/user_namespace.h
@@ -23,6 +23,7 @@
struct uid_gid_map projid_map;
atomic_t count;
struct user_namespace *parent;
+ int level;
kuid_t owner;
kgid_t group;
unsigned int proc_inum;
diff --git a/include/linux/wait.h b/include/linux/wait.h
index f487a47..a67fc16 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -811,6 +811,63 @@
__ret; \
})
+#define __wait_event_interruptible_lock_irq_timeout(wq, condition, \
+ lock, ret) \
+do { \
+ DEFINE_WAIT(__wait); \
+ \
+ for (;;) { \
+ prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \
+ if (condition) \
+ break; \
+ if (signal_pending(current)) { \
+ ret = -ERESTARTSYS; \
+ break; \
+ } \
+ spin_unlock_irq(&lock); \
+ ret = schedule_timeout(ret); \
+ spin_lock_irq(&lock); \
+ if (!ret) \
+ break; \
+ } \
+ finish_wait(&wq, &__wait); \
+} while (0)
+
+/**
+ * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets true or a timeout elapses.
+ * The condition is checked under the lock. This is expected
+ * to be called with the lock taken.
+ * @wq: the waitqueue to wait on
+ * @condition: a C expression for the event to wait for
+ * @lock: a locked spinlock_t, which will be released before schedule()
+ * and reacquired afterwards.
+ * @timeout: timeout, in jiffies
+ *
+ * The process is put to sleep (TASK_INTERRUPTIBLE) until the
+ * @condition evaluates to true or signal is received. The @condition is
+ * checked each time the waitqueue @wq is woken up.
+ *
+ * wake_up() has to be called after changing any variable that could
+ * change the result of the wait condition.
+ *
+ * This is supposed to be called while holding the lock. The lock is
+ * dropped before going to sleep and is reacquired afterwards.
+ *
+ * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
+ * was interrupted by a signal, and the remaining jiffies otherwise
+ * if the condition evaluated to true before the timeout elapsed.
+ */
+#define wait_event_interruptible_lock_irq_timeout(wq, condition, lock, \
+ timeout) \
+({ \
+ int __ret = timeout; \
+ \
+ if (!(condition)) \
+ __wait_event_interruptible_lock_irq_timeout( \
+ wq, condition, lock, __ret); \
+ __ret; \
+})
+
/*
* These are the old interfaces to sleep waiting for an event.
diff --git a/include/media/v4l2-ctrls.h b/include/media/v4l2-ctrls.h
index 7343a27..47ada23 100644
--- a/include/media/v4l2-ctrls.h
+++ b/include/media/v4l2-ctrls.h
@@ -22,6 +22,7 @@
#define _V4L2_CTRLS_H
#include <linux/list.h>
+#include <linux/mutex.h>
#include <linux/videodev2.h>
/* forward references */
diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h
index f18b919..8a358a2 100644
--- a/include/net/busy_poll.h
+++ b/include/net/busy_poll.h
@@ -122,7 +122,7 @@
if (rc > 0)
/* local bh are disabled so it is ok to use _BH */
NET_ADD_STATS_BH(sock_net(sk),
- LINUX_MIB_LOWLATENCYRXPACKETS, rc);
+ LINUX_MIB_BUSYPOLLRXPACKETS, rc);
} while (!nonblock && skb_queue_empty(&sk->sk_receive_queue) &&
!need_resched() && !busy_loop_timeout(end_time));
@@ -162,11 +162,6 @@
return false;
}
-static inline bool sk_busy_poll(struct sock *sk, int nonblock)
-{
- return false;
-}
-
static inline void skb_mark_napi_id(struct sk_buff *skb,
struct napi_struct *napi)
{
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index 260f83f..f667248 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -135,6 +135,8 @@
extern void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk,
__be32 mtu);
extern void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark);
+extern void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif,
+ u32 mark);
extern void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk);
struct netlink_callback;
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index 781b3cf..a354db5 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -145,20 +145,6 @@
return INET_ECN_encapsulate(tos, inner);
}
-static inline void tunnel_ip_select_ident(struct sk_buff *skb,
- const struct iphdr *old_iph,
- struct dst_entry *dst)
-{
- struct iphdr *iph = ip_hdr(skb);
-
- /* Use inner packet iph-id if possible. */
- if (skb->protocol == htons(ETH_P_IP) && old_iph->id)
- iph->id = old_iph->id;
- else
- __ip_select_ident(iph, dst,
- (skb_shinfo(skb)->gso_segs ?: 1) - 1);
-}
-
int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto);
int iptunnel_xmit(struct net *net, struct rtable *rt,
struct sk_buff *skb,
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 6eab6336..e5ae0c5 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -683,13 +683,19 @@
u64 rate_bytes_ps; /* bytes per second */
u32 mult;
u16 overhead;
+ u8 linklayer;
u8 shift;
};
static inline u64 psched_l2t_ns(const struct psched_ratecfg *r,
unsigned int len)
{
- return ((u64)(len + r->overhead) * r->mult) >> r->shift;
+ len += r->overhead;
+
+ if (unlikely(r->linklayer == TC_LINKLAYER_ATM))
+ return ((u64)(DIV_ROUND_UP(len,48)*53) * r->mult) >> r->shift;
+
+ return ((u64)len * r->mult) >> r->shift;
}
extern void psched_ratecfg_precompute(struct psched_ratecfg *r, const struct tc_ratespec *conf);
@@ -700,6 +706,7 @@
memset(res, 0, sizeof(*res));
res->rate = r->rate_bytes_ps;
res->overhead = r->overhead;
+ res->linklayer = (r->linklayer & TC_LINKLAYER_MASK);
}
#endif
diff --git a/include/uapi/linux/ip.h b/include/uapi/linux/ip.h
index 6cf06bf..2fee45b 100644
--- a/include/uapi/linux/ip.h
+++ b/include/uapi/linux/ip.h
@@ -133,4 +133,38 @@
__u8 reserved;
};
+/* index values for the variables in ipv4_devconf */
+enum
+{
+ IPV4_DEVCONF_FORWARDING=1,
+ IPV4_DEVCONF_MC_FORWARDING,
+ IPV4_DEVCONF_PROXY_ARP,
+ IPV4_DEVCONF_ACCEPT_REDIRECTS,
+ IPV4_DEVCONF_SECURE_REDIRECTS,
+ IPV4_DEVCONF_SEND_REDIRECTS,
+ IPV4_DEVCONF_SHARED_MEDIA,
+ IPV4_DEVCONF_RP_FILTER,
+ IPV4_DEVCONF_ACCEPT_SOURCE_ROUTE,
+ IPV4_DEVCONF_BOOTP_RELAY,
+ IPV4_DEVCONF_LOG_MARTIANS,
+ IPV4_DEVCONF_TAG,
+ IPV4_DEVCONF_ARPFILTER,
+ IPV4_DEVCONF_MEDIUM_ID,
+ IPV4_DEVCONF_NOXFRM,
+ IPV4_DEVCONF_NOPOLICY,
+ IPV4_DEVCONF_FORCE_IGMP_VERSION,
+ IPV4_DEVCONF_ARP_ANNOUNCE,
+ IPV4_DEVCONF_ARP_IGNORE,
+ IPV4_DEVCONF_PROMOTE_SECONDARIES,
+ IPV4_DEVCONF_ARP_ACCEPT,
+ IPV4_DEVCONF_ARP_NOTIFY,
+ IPV4_DEVCONF_ACCEPT_LOCAL,
+ IPV4_DEVCONF_SRC_VMARK,
+ IPV4_DEVCONF_PROXY_ARP_PVLAN,
+ IPV4_DEVCONF_ROUTE_LOCALNET,
+ __IPV4_DEVCONF_MAX
+};
+
+#define IPV4_DEVCONF_MAX (__IPV4_DEVCONF_MAX - 1)
+
#endif /* _UAPI_LINUX_IP_H */
diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h
index dbd71b0..09d62b92 100644
--- a/include/uapi/linux/pkt_sched.h
+++ b/include/uapi/linux/pkt_sched.h
@@ -73,9 +73,17 @@
#define TC_H_ROOT (0xFFFFFFFFU)
#define TC_H_INGRESS (0xFFFFFFF1U)
+/* Need to corrospond to iproute2 tc/tc_core.h "enum link_layer" */
+enum tc_link_layer {
+ TC_LINKLAYER_UNAWARE, /* Indicate unaware old iproute2 util */
+ TC_LINKLAYER_ETHERNET,
+ TC_LINKLAYER_ATM,
+};
+#define TC_LINKLAYER_MASK 0x0F /* limit use to lower 4 bits */
+
struct tc_ratespec {
unsigned char cell_log;
- unsigned char __reserved;
+ __u8 linklayer; /* lower 4 bits */
unsigned short overhead;
short cell_align;
unsigned short mpu;
diff --git a/include/uapi/linux/snmp.h b/include/uapi/linux/snmp.h
index af0a674..a1356d3 100644
--- a/include/uapi/linux/snmp.h
+++ b/include/uapi/linux/snmp.h
@@ -253,7 +253,7 @@
LINUX_MIB_TCPFASTOPENLISTENOVERFLOW, /* TCPFastOpenListenOverflow */
LINUX_MIB_TCPFASTOPENCOOKIEREQD, /* TCPFastOpenCookieReqd */
LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES, /* TCPSpuriousRtxHostQueues */
- LINUX_MIB_LOWLATENCYRXPACKETS, /* LowLatencyRxPackets */
+ LINUX_MIB_BUSYPOLLRXPACKETS, /* BusyPollRxPackets */
__LINUX_MIB_MAX
};
diff --git a/init/Kconfig b/init/Kconfig
index 247084b..fed81b5 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -955,7 +955,7 @@
Memory Resource Controller Swap Extension comes with its price in
a bigger memory consumption. General purpose distribution kernels
which want to enable the feature but keep it disabled by default
- and let the user enable it by swapaccount boot command line
+ and let the user enable it by swapaccount=1 boot command line
parameter should have this option unselected.
For those who want to have the feature enabled by default should
select this option (if, for some reason, they need to disable it
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 789ec46..781845a 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -4335,8 +4335,10 @@
}
err = percpu_ref_init(&css->refcnt, css_release);
- if (err)
+ if (err) {
+ ss->css_free(cgrp);
goto err_free_all;
+ }
init_cgroup_css(css, ss, cgrp);
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index e565778..ea1966d 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -475,13 +475,17 @@
/*
* Cpusets with tasks - existing or newly being attached - can't
- * have empty cpus_allowed or mems_allowed.
+ * be changed to have empty cpus_allowed or mems_allowed.
*/
ret = -ENOSPC;
- if ((cgroup_task_count(cur->css.cgroup) || cur->attach_in_progress) &&
- (cpumask_empty(trial->cpus_allowed) &&
- nodes_empty(trial->mems_allowed)))
- goto out;
+ if ((cgroup_task_count(cur->css.cgroup) || cur->attach_in_progress)) {
+ if (!cpumask_empty(cur->cpus_allowed) &&
+ cpumask_empty(trial->cpus_allowed))
+ goto out;
+ if (!nodes_empty(cur->mems_allowed) &&
+ nodes_empty(trial->mems_allowed))
+ goto out;
+ }
ret = 0;
out:
@@ -1608,11 +1612,13 @@
{
struct cpuset *cs = cgroup_cs(cgrp);
cpuset_filetype_t type = cft->private;
- int retval = -ENODEV;
+ int retval = 0;
mutex_lock(&cpuset_mutex);
- if (!is_cpuset_online(cs))
+ if (!is_cpuset_online(cs)) {
+ retval = -ENODEV;
goto out_unlock;
+ }
switch (type) {
case FILE_CPU_EXCLUSIVE:
diff --git a/kernel/fork.c b/kernel/fork.c
index 403d2bb..e23bb19 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1679,6 +1679,12 @@
int __user *, parent_tidptr,
int __user *, child_tidptr,
int, tls_val)
+#elif defined(CONFIG_CLONE_BACKWARDS3)
+SYSCALL_DEFINE6(clone, unsigned long, clone_flags, unsigned long, newsp,
+ int, stack_size,
+ int __user *, parent_tidptr,
+ int __user *, child_tidptr,
+ int, tls_val)
#else
SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp,
int __user *, parent_tidptr,
diff --git a/kernel/mutex.c b/kernel/mutex.c
index ff05f4b..a52ee7bb 100644
--- a/kernel/mutex.c
+++ b/kernel/mutex.c
@@ -686,7 +686,7 @@
might_sleep();
ret = __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE,
0, &ctx->dep_map, _RET_IP_, ctx);
- if (!ret && ctx->acquired > 0)
+ if (!ret && ctx->acquired > 1)
return ww_mutex_deadlock_injection(lock, ctx);
return ret;
@@ -702,7 +702,7 @@
ret = __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE,
0, &ctx->dep_map, _RET_IP_, ctx);
- if (!ret && ctx->acquired > 0)
+ if (!ret && ctx->acquired > 1)
return ww_mutex_deadlock_injection(lock, ctx);
return ret;
diff --git a/kernel/power/qos.c b/kernel/power/qos.c
index 06fe285..a394297 100644
--- a/kernel/power/qos.c
+++ b/kernel/power/qos.c
@@ -296,6 +296,17 @@
}
EXPORT_SYMBOL_GPL(pm_qos_request_active);
+static void __pm_qos_update_request(struct pm_qos_request *req,
+ s32 new_value)
+{
+ trace_pm_qos_update_request(req->pm_qos_class, new_value);
+
+ if (new_value != req->node.prio)
+ pm_qos_update_target(
+ pm_qos_array[req->pm_qos_class]->constraints,
+ &req->node, PM_QOS_UPDATE_REQ, new_value);
+}
+
/**
* pm_qos_work_fn - the timeout handler of pm_qos_update_request_timeout
* @work: work struct for the delayed work (timeout)
@@ -308,7 +319,7 @@
struct pm_qos_request,
work);
- pm_qos_update_request(req, PM_QOS_DEFAULT_VALUE);
+ __pm_qos_update_request(req, PM_QOS_DEFAULT_VALUE);
}
/**
@@ -364,12 +375,7 @@
}
cancel_delayed_work_sync(&req->work);
-
- trace_pm_qos_update_request(req->pm_qos_class, new_value);
- if (new_value != req->node.prio)
- pm_qos_update_target(
- pm_qos_array[req->pm_qos_class]->constraints,
- &req->node, PM_QOS_UPDATE_REQ, new_value);
+ __pm_qos_update_request(req, new_value);
}
EXPORT_SYMBOL_GPL(pm_qos_update_request);
diff --git a/kernel/printk/braille.c b/kernel/printk/braille.c
index b51087f..276762f 100644
--- a/kernel/printk/braille.c
+++ b/kernel/printk/braille.c
@@ -19,7 +19,8 @@
pr_err("need port name after brl=\n");
else
*((*str)++) = 0;
- }
+ } else
+ return NULL;
return *str;
}
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 4041f57..a146ee3 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -469,7 +469,6 @@
/* Architecture-specific hardware disable .. */
ptrace_disable(child);
clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
- flush_ptrace_hw_breakpoint(child);
write_lock_irq(&tasklist_lock);
/*
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index b7c32cb..05c39f0 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -933,6 +933,8 @@
/**
* task_curr - is this task currently executing on a CPU?
* @p: the task in question.
+ *
+ * Return: 1 if the task is currently executing. 0 otherwise.
*/
inline int task_curr(const struct task_struct *p)
{
@@ -1482,7 +1484,7 @@
* the simpler "current->state = TASK_RUNNING" to mark yourself
* runnable without the overhead of this.
*
- * Returns %true if @p was woken up, %false if it was already running
+ * Return: %true if @p was woken up, %false if it was already running.
* or @state didn't match @p's state.
*/
static int
@@ -1491,7 +1493,13 @@
unsigned long flags;
int cpu, success = 0;
- smp_wmb();
+ /*
+ * If we are going to wake up a thread waiting for CONDITION we
+ * need to ensure that CONDITION=1 done by the caller can not be
+ * reordered with p->state check below. This pairs with mb() in
+ * set_current_state() the waiting thread does.
+ */
+ smp_mb__before_spinlock();
raw_spin_lock_irqsave(&p->pi_lock, flags);
if (!(p->state & state))
goto out;
@@ -1577,8 +1585,9 @@
* @p: The process to be woken up.
*
* Attempt to wake up the nominated process and move it to the set of runnable
- * processes. Returns 1 if the process was woken up, 0 if it was already
- * running.
+ * processes.
+ *
+ * Return: 1 if the process was woken up, 0 if it was already running.
*
* It may be assumed that this function implies a write memory barrier before
* changing the task state if and only if any tasks are woken up.
@@ -2191,6 +2200,8 @@
* This makes sure that uptime, CFS vruntime, load
* balancing, etc... continue to move forward, even
* with a very low granularity.
+ *
+ * Return: Maximum deferment in nanoseconds.
*/
u64 scheduler_tick_max_deferment(void)
{
@@ -2394,6 +2405,12 @@
if (sched_feat(HRTICK))
hrtick_clear(rq);
+ /*
+ * Make sure that signal_pending_state()->signal_pending() below
+ * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE)
+ * done by the caller to avoid the race with signal_wake_up().
+ */
+ smp_mb__before_spinlock();
raw_spin_lock_irq(&rq->lock);
switch_count = &prev->nivcsw;
@@ -2796,8 +2813,8 @@
* specified timeout to expire. The timeout is in jiffies. It is not
* interruptible.
*
- * The return value is 0 if timed out, and positive (at least 1, or number of
- * jiffies left till timeout) if completed.
+ * Return: 0 if timed out, and positive (at least 1, or number of jiffies left
+ * till timeout) if completed.
*/
unsigned long __sched
wait_for_completion_timeout(struct completion *x, unsigned long timeout)
@@ -2829,8 +2846,8 @@
* specified timeout to expire. The timeout is in jiffies. It is not
* interruptible. The caller is accounted as waiting for IO.
*
- * The return value is 0 if timed out, and positive (at least 1, or number of
- * jiffies left till timeout) if completed.
+ * Return: 0 if timed out, and positive (at least 1, or number of jiffies left
+ * till timeout) if completed.
*/
unsigned long __sched
wait_for_completion_io_timeout(struct completion *x, unsigned long timeout)
@@ -2846,7 +2863,7 @@
* This waits for completion of a specific task to be signaled. It is
* interruptible.
*
- * The return value is -ERESTARTSYS if interrupted, 0 if completed.
+ * Return: -ERESTARTSYS if interrupted, 0 if completed.
*/
int __sched wait_for_completion_interruptible(struct completion *x)
{
@@ -2865,8 +2882,8 @@
* This waits for either a completion of a specific task to be signaled or for a
* specified timeout to expire. It is interruptible. The timeout is in jiffies.
*
- * The return value is -ERESTARTSYS if interrupted, 0 if timed out,
- * positive (at least 1, or number of jiffies left till timeout) if completed.
+ * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
+ * or number of jiffies left till timeout) if completed.
*/
long __sched
wait_for_completion_interruptible_timeout(struct completion *x,
@@ -2883,7 +2900,7 @@
* This waits to be signaled for completion of a specific task. It can be
* interrupted by a kill signal.
*
- * The return value is -ERESTARTSYS if interrupted, 0 if completed.
+ * Return: -ERESTARTSYS if interrupted, 0 if completed.
*/
int __sched wait_for_completion_killable(struct completion *x)
{
@@ -2903,8 +2920,8 @@
* signaled or for a specified timeout to expire. It can be
* interrupted by a kill signal. The timeout is in jiffies.
*
- * The return value is -ERESTARTSYS if interrupted, 0 if timed out,
- * positive (at least 1, or number of jiffies left till timeout) if completed.
+ * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
+ * or number of jiffies left till timeout) if completed.
*/
long __sched
wait_for_completion_killable_timeout(struct completion *x,
@@ -2918,7 +2935,7 @@
* try_wait_for_completion - try to decrement a completion without blocking
* @x: completion structure
*
- * Returns: 0 if a decrement cannot be done without blocking
+ * Return: 0 if a decrement cannot be done without blocking
* 1 if a decrement succeeded.
*
* If a completion is being used as a counting completion,
@@ -2945,7 +2962,7 @@
* completion_done - Test to see if a completion has any waiters
* @x: completion structure
*
- * Returns: 0 if there are waiters (wait_for_completion() in progress)
+ * Return: 0 if there are waiters (wait_for_completion() in progress)
* 1 if there are no waiters.
*
*/
@@ -3182,7 +3199,7 @@
* task_prio - return the priority value of a given task.
* @p: the task in question.
*
- * This is the priority value as seen by users in /proc.
+ * Return: The priority value as seen by users in /proc.
* RT tasks are offset by -200. Normal tasks are centered
* around 0, value goes from -16 to +15.
*/
@@ -3194,6 +3211,8 @@
/**
* task_nice - return the nice value of a given task.
* @p: the task in question.
+ *
+ * Return: The nice value [ -20 ... 0 ... 19 ].
*/
int task_nice(const struct task_struct *p)
{
@@ -3204,6 +3223,8 @@
/**
* idle_cpu - is a given cpu idle currently?
* @cpu: the processor in question.
+ *
+ * Return: 1 if the CPU is currently idle. 0 otherwise.
*/
int idle_cpu(int cpu)
{
@@ -3226,6 +3247,8 @@
/**
* idle_task - return the idle task for a given cpu.
* @cpu: the processor in question.
+ *
+ * Return: The idle task for the cpu @cpu.
*/
struct task_struct *idle_task(int cpu)
{
@@ -3235,6 +3258,8 @@
/**
* find_process_by_pid - find a process with a matching PID value.
* @pid: the pid in question.
+ *
+ * The task of @pid, if found. %NULL otherwise.
*/
static struct task_struct *find_process_by_pid(pid_t pid)
{
@@ -3432,6 +3457,8 @@
* @policy: new policy.
* @param: structure containing the new RT priority.
*
+ * Return: 0 on success. An error code otherwise.
+ *
* NOTE that the task may be already dead.
*/
int sched_setscheduler(struct task_struct *p, int policy,
@@ -3451,6 +3478,8 @@
* current context has permission. For example, this is needed in
* stop_machine(): we create temporary high priority worker threads,
* but our caller might not have that capability.
+ *
+ * Return: 0 on success. An error code otherwise.
*/
int sched_setscheduler_nocheck(struct task_struct *p, int policy,
const struct sched_param *param)
@@ -3485,6 +3514,8 @@
* @pid: the pid in question.
* @policy: new policy.
* @param: structure containing the new RT priority.
+ *
+ * Return: 0 on success. An error code otherwise.
*/
SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy,
struct sched_param __user *, param)
@@ -3500,6 +3531,8 @@
* sys_sched_setparam - set/change the RT priority of a thread
* @pid: the pid in question.
* @param: structure containing the new RT priority.
+ *
+ * Return: 0 on success. An error code otherwise.
*/
SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
{
@@ -3509,6 +3542,9 @@
/**
* sys_sched_getscheduler - get the policy (scheduling class) of a thread
* @pid: the pid in question.
+ *
+ * Return: On success, the policy of the thread. Otherwise, a negative error
+ * code.
*/
SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
{
@@ -3535,6 +3571,9 @@
* sys_sched_getparam - get the RT priority of a thread
* @pid: the pid in question.
* @param: structure containing the RT priority.
+ *
+ * Return: On success, 0 and the RT priority is in @param. Otherwise, an error
+ * code.
*/
SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
{
@@ -3659,6 +3698,8 @@
* @pid: pid of the process
* @len: length in bytes of the bitmask pointed to by user_mask_ptr
* @user_mask_ptr: user-space pointer to the new cpu mask
+ *
+ * Return: 0 on success. An error code otherwise.
*/
SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
unsigned long __user *, user_mask_ptr)
@@ -3710,6 +3751,8 @@
* @pid: pid of the process
* @len: length in bytes of the bitmask pointed to by user_mask_ptr
* @user_mask_ptr: user-space pointer to hold the current cpu mask
+ *
+ * Return: 0 on success. An error code otherwise.
*/
SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
unsigned long __user *, user_mask_ptr)
@@ -3744,6 +3787,8 @@
*
* This function yields the current CPU to other tasks. If there are no
* other threads running on this CPU then this function will return.
+ *
+ * Return: 0.
*/
SYSCALL_DEFINE0(sched_yield)
{
@@ -3869,7 +3914,7 @@
* It's the caller's job to ensure that the target task struct
* can't go away on us before we can do any checks.
*
- * Returns:
+ * Return:
* true (>0) if we indeed boosted the target task.
* false (0) if we failed to boost the target.
* -ESRCH if there's no task to yield to.
@@ -3972,8 +4017,9 @@
* sys_sched_get_priority_max - return maximum RT priority.
* @policy: scheduling class.
*
- * this syscall returns the maximum rt_priority that can be used
- * by a given scheduling class.
+ * Return: On success, this syscall returns the maximum
+ * rt_priority that can be used by a given scheduling class.
+ * On failure, a negative error code is returned.
*/
SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
{
@@ -3997,8 +4043,9 @@
* sys_sched_get_priority_min - return minimum RT priority.
* @policy: scheduling class.
*
- * this syscall returns the minimum rt_priority that can be used
- * by a given scheduling class.
+ * Return: On success, this syscall returns the minimum
+ * rt_priority that can be used by a given scheduling class.
+ * On failure, a negative error code is returned.
*/
SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
{
@@ -4024,6 +4071,9 @@
*
* this syscall writes the default timeslice value of a given process
* into the user-space timespec buffer. A value of '0' means infinity.
+ *
+ * Return: On success, 0 and the timeslice is in @interval. Otherwise,
+ * an error code.
*/
SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
struct timespec __user *, interval)
@@ -6632,6 +6682,8 @@
* @cpu: the processor in question.
*
* ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
+ *
+ * Return: The current task for @cpu.
*/
struct task_struct *curr_task(int cpu)
{
diff --git a/kernel/sched/cpupri.c b/kernel/sched/cpupri.c
index 1095e87..8b836b3 100644
--- a/kernel/sched/cpupri.c
+++ b/kernel/sched/cpupri.c
@@ -62,7 +62,7 @@
* any discrepancies created by racing against the uncertainty of the current
* priority configuration.
*
- * Returns: (int)bool - CPUs were found
+ * Return: (int)bool - CPUs were found
*/
int cpupri_find(struct cpupri *cp, struct task_struct *p,
struct cpumask *lowest_mask)
@@ -203,7 +203,7 @@
* cpupri_init - initialize the cpupri structure
* @cp: The cpupri context
*
- * Returns: -ENOMEM if memory fails.
+ * Return: -ENOMEM on memory allocation failure.
*/
int cpupri_init(struct cpupri *cp)
{
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 9565645..68f1609 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2032,6 +2032,7 @@
*/
update_entity_load_avg(curr, 1);
update_cfs_rq_blocked_load(cfs_rq, 1);
+ update_cfs_shares(cfs_rq);
#ifdef CONFIG_SCHED_HRTICK
/*
@@ -4280,6 +4281,8 @@
* get_sd_load_idx - Obtain the load index for a given sched domain.
* @sd: The sched_domain whose load_idx is to be obtained.
* @idle: The Idle status of the CPU for whose sd load_icx is obtained.
+ *
+ * Return: The load index.
*/
static inline int get_sd_load_idx(struct sched_domain *sd,
enum cpu_idle_type idle)
@@ -4574,6 +4577,9 @@
*
* Determine if @sg is a busier group than the previously selected
* busiest group.
+ *
+ * Return: %true if @sg is a busier group than the previously selected
+ * busiest group. %false otherwise.
*/
static bool update_sd_pick_busiest(struct lb_env *env,
struct sd_lb_stats *sds,
@@ -4691,7 +4697,7 @@
* assuming lower CPU number will be equivalent to lower a SMT thread
* number.
*
- * Returns 1 when packing is required and a task should be moved to
+ * Return: 1 when packing is required and a task should be moved to
* this CPU. The amount of the imbalance is returned in *imbalance.
*
* @env: The load balancing environment.
@@ -4869,7 +4875,7 @@
* @balance: Pointer to a variable indicating if this_cpu
* is the appropriate cpu to perform load balancing at this_level.
*
- * Returns: - the busiest group if imbalance exists.
+ * Return: - The busiest group if imbalance exists.
* - If no imbalance and user has opted for power-savings balance,
* return the least loaded group whose CPUs can be
* put to idle by rebalancing its tasks onto our group.
diff --git a/kernel/time/sched_clock.c b/kernel/time/sched_clock.c
index a326f27..0b479a6 100644
--- a/kernel/time/sched_clock.c
+++ b/kernel/time/sched_clock.c
@@ -121,7 +121,7 @@
BUG_ON(bits > 32);
WARN_ON(!irqs_disabled());
read_sched_clock = read;
- sched_clock_mask = (1 << bits) - 1;
+ sched_clock_mask = (1ULL << bits) - 1;
cd.rate = rate;
/* calculate the mult/shift to convert counter ticks to ns. */
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index e77edc9..e8a1516 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -182,7 +182,8 @@
* Don't allow the user to think they can get
* full NO_HZ with this machine.
*/
- WARN_ONCE(1, "NO_HZ FULL will not work with unstable sched clock");
+ WARN_ONCE(have_nohz_full_mask,
+ "NO_HZ FULL will not work with unstable sched clock");
return false;
}
#endif
@@ -343,8 +344,6 @@
void __init tick_nohz_init(void)
{
- int cpu;
-
if (!have_nohz_full_mask) {
if (tick_nohz_init_all() < 0)
return;
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 8ce9eefc..a6d098c 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -2169,12 +2169,57 @@
static unsigned long ftrace_update_cnt;
unsigned long ftrace_update_tot_cnt;
-static int ops_traces_mod(struct ftrace_ops *ops)
+static inline int ops_traces_mod(struct ftrace_ops *ops)
{
- struct ftrace_hash *hash;
+ /*
+ * Filter_hash being empty will default to trace module.
+ * But notrace hash requires a test of individual module functions.
+ */
+ return ftrace_hash_empty(ops->filter_hash) &&
+ ftrace_hash_empty(ops->notrace_hash);
+}
- hash = ops->filter_hash;
- return ftrace_hash_empty(hash);
+/*
+ * Check if the current ops references the record.
+ *
+ * If the ops traces all functions, then it was already accounted for.
+ * If the ops does not trace the current record function, skip it.
+ * If the ops ignores the function via notrace filter, skip it.
+ */
+static inline bool
+ops_references_rec(struct ftrace_ops *ops, struct dyn_ftrace *rec)
+{
+ /* If ops isn't enabled, ignore it */
+ if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
+ return 0;
+
+ /* If ops traces all mods, we already accounted for it */
+ if (ops_traces_mod(ops))
+ return 0;
+
+ /* The function must be in the filter */
+ if (!ftrace_hash_empty(ops->filter_hash) &&
+ !ftrace_lookup_ip(ops->filter_hash, rec->ip))
+ return 0;
+
+ /* If in notrace hash, we ignore it too */
+ if (ftrace_lookup_ip(ops->notrace_hash, rec->ip))
+ return 0;
+
+ return 1;
+}
+
+static int referenced_filters(struct dyn_ftrace *rec)
+{
+ struct ftrace_ops *ops;
+ int cnt = 0;
+
+ for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) {
+ if (ops_references_rec(ops, rec))
+ cnt++;
+ }
+
+ return cnt;
}
static int ftrace_update_code(struct module *mod)
@@ -2183,6 +2228,7 @@
struct dyn_ftrace *p;
cycle_t start, stop;
unsigned long ref = 0;
+ bool test = false;
int i;
/*
@@ -2196,9 +2242,12 @@
for (ops = ftrace_ops_list;
ops != &ftrace_list_end; ops = ops->next) {
- if (ops->flags & FTRACE_OPS_FL_ENABLED &&
- ops_traces_mod(ops))
- ref++;
+ if (ops->flags & FTRACE_OPS_FL_ENABLED) {
+ if (ops_traces_mod(ops))
+ ref++;
+ else
+ test = true;
+ }
}
}
@@ -2208,12 +2257,16 @@
for (pg = ftrace_new_pgs; pg; pg = pg->next) {
for (i = 0; i < pg->index; i++) {
+ int cnt = ref;
+
/* If something went wrong, bail without enabling anything */
if (unlikely(ftrace_disabled))
return -1;
p = &pg->records[i];
- p->flags = ref;
+ if (test)
+ cnt += referenced_filters(p);
+ p->flags = cnt;
/*
* Do the initial record conversion from mcount jump
@@ -2233,7 +2286,7 @@
* conversion puts the module to the correct state, thus
* passing the ftrace_make_call check.
*/
- if (ftrace_start_up && ref) {
+ if (ftrace_start_up && cnt) {
int failed = __ftrace_replace_code(p, 1);
if (failed)
ftrace_bug(failed, p->ip);
@@ -3384,6 +3437,12 @@
return add_hash_entry(hash, ip);
}
+static void ftrace_ops_update_code(struct ftrace_ops *ops)
+{
+ if (ops->flags & FTRACE_OPS_FL_ENABLED && ftrace_enabled)
+ ftrace_run_update_code(FTRACE_UPDATE_CALLS);
+}
+
static int
ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
unsigned long ip, int remove, int reset, int enable)
@@ -3426,9 +3485,8 @@
mutex_lock(&ftrace_lock);
ret = ftrace_hash_move(ops, enable, orig_hash, hash);
- if (!ret && ops->flags & FTRACE_OPS_FL_ENABLED
- && ftrace_enabled)
- ftrace_run_update_code(FTRACE_UPDATE_CALLS);
+ if (!ret)
+ ftrace_ops_update_code(ops);
mutex_unlock(&ftrace_lock);
@@ -3655,9 +3713,8 @@
mutex_lock(&ftrace_lock);
ret = ftrace_hash_move(iter->ops, filter_hash,
orig_hash, iter->hash);
- if (!ret && (iter->ops->flags & FTRACE_OPS_FL_ENABLED)
- && ftrace_enabled)
- ftrace_run_update_code(FTRACE_UPDATE_CALLS);
+ if (!ret)
+ ftrace_ops_update_code(iter->ops);
mutex_unlock(&ftrace_lock);
}
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 882ec1d..496f94d 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -243,20 +243,25 @@
}
EXPORT_SYMBOL_GPL(filter_current_check_discard);
-cycle_t ftrace_now(int cpu)
+cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu)
{
u64 ts;
/* Early boot up does not have a buffer yet */
- if (!global_trace.trace_buffer.buffer)
+ if (!buf->buffer)
return trace_clock_local();
- ts = ring_buffer_time_stamp(global_trace.trace_buffer.buffer, cpu);
- ring_buffer_normalize_time_stamp(global_trace.trace_buffer.buffer, cpu, &ts);
+ ts = ring_buffer_time_stamp(buf->buffer, cpu);
+ ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
return ts;
}
+cycle_t ftrace_now(int cpu)
+{
+ return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
+}
+
/**
* tracing_is_enabled - Show if global_trace has been disabled
*
@@ -1211,7 +1216,7 @@
/* Make sure all commits have finished */
synchronize_sched();
- buf->time_start = ftrace_now(buf->cpu);
+ buf->time_start = buffer_ftrace_now(buf, buf->cpu);
for_each_online_cpu(cpu)
ring_buffer_reset_cpu(buffer, cpu);
@@ -1219,11 +1224,6 @@
ring_buffer_record_enable(buffer);
}
-void tracing_reset_current(int cpu)
-{
- tracing_reset(&global_trace.trace_buffer, cpu);
-}
-
/* Must have trace_types_lock held */
void tracing_reset_all_online_cpus(void)
{
@@ -4151,6 +4151,7 @@
memset(&iter->seq, 0,
sizeof(struct trace_iterator) -
offsetof(struct trace_iterator, seq));
+ cpumask_clear(iter->started);
iter->pos = -1;
trace_event_read_lock();
@@ -4468,7 +4469,7 @@
/* disable tracing ? */
if (trace_flags & TRACE_ITER_STOP_ON_FREE)
- tracing_off();
+ tracer_tracing_off(tr);
/* resize the ring buffer to 0 */
tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
@@ -4633,12 +4634,12 @@
* New clock may not be consistent with the previous clock.
* Reset the buffer so that it doesn't have incomparable timestamps.
*/
- tracing_reset_online_cpus(&global_trace.trace_buffer);
+ tracing_reset_online_cpus(&tr->trace_buffer);
#ifdef CONFIG_TRACER_MAX_TRACE
if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
- tracing_reset_online_cpus(&global_trace.max_buffer);
+ tracing_reset_online_cpus(&tr->max_buffer);
#endif
mutex_unlock(&trace_types_lock);
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 898f868..29a7ebc 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -409,33 +409,42 @@
mutex_unlock(&event_mutex);
}
-/*
- * Open and update trace_array ref count.
- * Must have the current trace_array passed to it.
- */
-static int tracing_open_generic_file(struct inode *inode, struct file *filp)
+static void remove_subsystem(struct ftrace_subsystem_dir *dir)
{
- struct ftrace_event_file *file = inode->i_private;
- struct trace_array *tr = file->tr;
- int ret;
+ if (!dir)
+ return;
- if (trace_array_get(tr) < 0)
- return -ENODEV;
-
- ret = tracing_open_generic(inode, filp);
- if (ret < 0)
- trace_array_put(tr);
- return ret;
+ if (!--dir->nr_events) {
+ debugfs_remove_recursive(dir->entry);
+ list_del(&dir->list);
+ __put_system_dir(dir);
+ }
}
-static int tracing_release_generic_file(struct inode *inode, struct file *filp)
+static void *event_file_data(struct file *filp)
{
- struct ftrace_event_file *file = inode->i_private;
- struct trace_array *tr = file->tr;
+ return ACCESS_ONCE(file_inode(filp)->i_private);
+}
- trace_array_put(tr);
+static void remove_event_file_dir(struct ftrace_event_file *file)
+{
+ struct dentry *dir = file->dir;
+ struct dentry *child;
- return 0;
+ if (dir) {
+ spin_lock(&dir->d_lock); /* probably unneeded */
+ list_for_each_entry(child, &dir->d_subdirs, d_u.d_child) {
+ if (child->d_inode) /* probably unneeded */
+ child->d_inode->i_private = NULL;
+ }
+ spin_unlock(&dir->d_lock);
+
+ debugfs_remove_recursive(dir);
+ }
+
+ list_del(&file->list);
+ remove_subsystem(file->system);
+ kmem_cache_free(file_cachep, file);
}
/*
@@ -679,15 +688,25 @@
event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
loff_t *ppos)
{
- struct ftrace_event_file *file = filp->private_data;
+ struct ftrace_event_file *file;
+ unsigned long flags;
char buf[4] = "0";
- if (file->flags & FTRACE_EVENT_FL_ENABLED &&
- !(file->flags & FTRACE_EVENT_FL_SOFT_DISABLED))
+ mutex_lock(&event_mutex);
+ file = event_file_data(filp);
+ if (likely(file))
+ flags = file->flags;
+ mutex_unlock(&event_mutex);
+
+ if (!file)
+ return -ENODEV;
+
+ if (flags & FTRACE_EVENT_FL_ENABLED &&
+ !(flags & FTRACE_EVENT_FL_SOFT_DISABLED))
strcpy(buf, "1");
- if (file->flags & FTRACE_EVENT_FL_SOFT_DISABLED ||
- file->flags & FTRACE_EVENT_FL_SOFT_MODE)
+ if (flags & FTRACE_EVENT_FL_SOFT_DISABLED ||
+ flags & FTRACE_EVENT_FL_SOFT_MODE)
strcat(buf, "*");
strcat(buf, "\n");
@@ -699,13 +718,10 @@
event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
loff_t *ppos)
{
- struct ftrace_event_file *file = filp->private_data;
+ struct ftrace_event_file *file;
unsigned long val;
int ret;
- if (!file)
- return -EINVAL;
-
ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
if (ret)
return ret;
@@ -717,8 +733,11 @@
switch (val) {
case 0:
case 1:
+ ret = -ENODEV;
mutex_lock(&event_mutex);
- ret = ftrace_event_enable_disable(file, val);
+ file = event_file_data(filp);
+ if (likely(file))
+ ret = ftrace_event_enable_disable(file, val);
mutex_unlock(&event_mutex);
break;
@@ -825,7 +844,7 @@
static void *f_next(struct seq_file *m, void *v, loff_t *pos)
{
- struct ftrace_event_call *call = m->private;
+ struct ftrace_event_call *call = event_file_data(m->private);
struct list_head *common_head = &ftrace_common_fields;
struct list_head *head = trace_get_fields(call);
struct list_head *node = v;
@@ -857,7 +876,7 @@
static int f_show(struct seq_file *m, void *v)
{
- struct ftrace_event_call *call = m->private;
+ struct ftrace_event_call *call = event_file_data(m->private);
struct ftrace_event_field *field;
const char *array_descriptor;
@@ -910,6 +929,11 @@
void *p = (void *)FORMAT_HEADER;
loff_t l = 0;
+ /* ->stop() is called even if ->start() fails */
+ mutex_lock(&event_mutex);
+ if (!event_file_data(m->private))
+ return ERR_PTR(-ENODEV);
+
while (l < *pos && p)
p = f_next(m, p, &l);
@@ -918,6 +942,7 @@
static void f_stop(struct seq_file *m, void *p)
{
+ mutex_unlock(&event_mutex);
}
static const struct seq_operations trace_format_seq_ops = {
@@ -929,7 +954,6 @@
static int trace_format_open(struct inode *inode, struct file *file)
{
- struct ftrace_event_call *call = inode->i_private;
struct seq_file *m;
int ret;
@@ -938,7 +962,7 @@
return ret;
m = file->private_data;
- m->private = call;
+ m->private = file;
return 0;
}
@@ -946,14 +970,18 @@
static ssize_t
event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
{
- struct ftrace_event_call *call = filp->private_data;
+ int id = (long)event_file_data(filp);
char buf[32];
int len;
if (*ppos)
return 0;
- len = sprintf(buf, "%d\n", call->event.type);
+ if (unlikely(!id))
+ return -ENODEV;
+
+ len = sprintf(buf, "%d\n", id);
+
return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
}
@@ -961,21 +989,28 @@
event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
loff_t *ppos)
{
- struct ftrace_event_call *call = filp->private_data;
+ struct ftrace_event_call *call;
struct trace_seq *s;
- int r;
+ int r = -ENODEV;
if (*ppos)
return 0;
s = kmalloc(sizeof(*s), GFP_KERNEL);
+
if (!s)
return -ENOMEM;
trace_seq_init(s);
- print_event_filter(call, s);
- r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
+ mutex_lock(&event_mutex);
+ call = event_file_data(filp);
+ if (call)
+ print_event_filter(call, s);
+ mutex_unlock(&event_mutex);
+
+ if (call)
+ r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
kfree(s);
@@ -986,9 +1021,9 @@
event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
loff_t *ppos)
{
- struct ftrace_event_call *call = filp->private_data;
+ struct ftrace_event_call *call;
char *buf;
- int err;
+ int err = -ENODEV;
if (cnt >= PAGE_SIZE)
return -EINVAL;
@@ -1003,7 +1038,12 @@
}
buf[cnt] = '\0';
- err = apply_event_filter(call, buf);
+ mutex_lock(&event_mutex);
+ call = event_file_data(filp);
+ if (call)
+ err = apply_event_filter(call, buf);
+ mutex_unlock(&event_mutex);
+
free_page((unsigned long) buf);
if (err < 0)
return err;
@@ -1225,10 +1265,9 @@
};
static const struct file_operations ftrace_enable_fops = {
- .open = tracing_open_generic_file,
+ .open = tracing_open_generic,
.read = event_enable_read,
.write = event_enable_write,
- .release = tracing_release_generic_file,
.llseek = default_llseek,
};
@@ -1240,7 +1279,6 @@
};
static const struct file_operations ftrace_event_id_fops = {
- .open = tracing_open_generic,
.read = event_id_read,
.llseek = default_llseek,
};
@@ -1488,8 +1526,8 @@
#ifdef CONFIG_PERF_EVENTS
if (call->event.type && call->class->reg)
- trace_create_file("id", 0444, file->dir, call,
- id);
+ trace_create_file("id", 0444, file->dir,
+ (void *)(long)call->event.type, id);
#endif
/*
@@ -1514,33 +1552,16 @@
return 0;
}
-static void remove_subsystem(struct ftrace_subsystem_dir *dir)
-{
- if (!dir)
- return;
-
- if (!--dir->nr_events) {
- debugfs_remove_recursive(dir->entry);
- list_del(&dir->list);
- __put_system_dir(dir);
- }
-}
-
static void remove_event_from_tracers(struct ftrace_event_call *call)
{
struct ftrace_event_file *file;
struct trace_array *tr;
do_for_each_event_file_safe(tr, file) {
-
if (file->event_call != call)
continue;
- list_del(&file->list);
- debugfs_remove_recursive(file->dir);
- remove_subsystem(file->system);
- kmem_cache_free(file_cachep, file);
-
+ remove_event_file_dir(file);
/*
* The do_for_each_event_file_safe() is
* a double loop. After finding the call for this
@@ -1692,16 +1713,53 @@
destroy_preds(call);
}
-/* Remove an event_call */
-void trace_remove_event_call(struct ftrace_event_call *call)
+static int probe_remove_event_call(struct ftrace_event_call *call)
{
+ struct trace_array *tr;
+ struct ftrace_event_file *file;
+
+#ifdef CONFIG_PERF_EVENTS
+ if (call->perf_refcount)
+ return -EBUSY;
+#endif
+ do_for_each_event_file(tr, file) {
+ if (file->event_call != call)
+ continue;
+ /*
+ * We can't rely on ftrace_event_enable_disable(enable => 0)
+ * we are going to do, FTRACE_EVENT_FL_SOFT_MODE can suppress
+ * TRACE_REG_UNREGISTER.
+ */
+ if (file->flags & FTRACE_EVENT_FL_ENABLED)
+ return -EBUSY;
+ /*
+ * The do_for_each_event_file_safe() is
+ * a double loop. After finding the call for this
+ * trace_array, we use break to jump to the next
+ * trace_array.
+ */
+ break;
+ } while_for_each_event_file();
+
+ __trace_remove_event_call(call);
+
+ return 0;
+}
+
+/* Remove an event_call */
+int trace_remove_event_call(struct ftrace_event_call *call)
+{
+ int ret;
+
mutex_lock(&trace_types_lock);
mutex_lock(&event_mutex);
down_write(&trace_event_sem);
- __trace_remove_event_call(call);
+ ret = probe_remove_event_call(call);
up_write(&trace_event_sem);
mutex_unlock(&event_mutex);
mutex_unlock(&trace_types_lock);
+
+ return ret;
}
#define for_each_event(event, start, end) \
@@ -2270,12 +2328,8 @@
{
struct ftrace_event_file *file, *next;
- list_for_each_entry_safe(file, next, &tr->events, list) {
- list_del(&file->list);
- debugfs_remove_recursive(file->dir);
- remove_subsystem(file->system);
- kmem_cache_free(file_cachep, file);
- }
+ list_for_each_entry_safe(file, next, &tr->events, list)
+ remove_event_file_dir(file);
}
static void
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index 0c7b75a..97daa8c 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -637,17 +637,15 @@
free_page((unsigned long) buf);
}
+/* caller must hold event_mutex */
void print_event_filter(struct ftrace_event_call *call, struct trace_seq *s)
{
- struct event_filter *filter;
+ struct event_filter *filter = call->filter;
- mutex_lock(&event_mutex);
- filter = call->filter;
if (filter && filter->filter_string)
trace_seq_printf(s, "%s\n", filter->filter_string);
else
trace_seq_puts(s, "none\n");
- mutex_unlock(&event_mutex);
}
void print_subsystem_event_filter(struct event_subsystem *system,
@@ -1841,23 +1839,22 @@
return err;
}
+/* caller must hold event_mutex */
int apply_event_filter(struct ftrace_event_call *call, char *filter_string)
{
struct event_filter *filter;
- int err = 0;
-
- mutex_lock(&event_mutex);
+ int err;
if (!strcmp(strstrip(filter_string), "0")) {
filter_disable(call);
filter = call->filter;
if (!filter)
- goto out_unlock;
+ return 0;
RCU_INIT_POINTER(call->filter, NULL);
/* Make sure the filter is not being used */
synchronize_sched();
__free_filter(filter);
- goto out_unlock;
+ return 0;
}
err = create_filter(call, filter_string, true, &filter);
@@ -1884,8 +1881,6 @@
__free_filter(tmp);
}
}
-out_unlock:
- mutex_unlock(&event_mutex);
return err;
}
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 3811487..243f683 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -95,7 +95,7 @@
}
static int register_probe_event(struct trace_probe *tp);
-static void unregister_probe_event(struct trace_probe *tp);
+static int unregister_probe_event(struct trace_probe *tp);
static DEFINE_MUTEX(probe_lock);
static LIST_HEAD(probe_list);
@@ -351,9 +351,12 @@
if (trace_probe_is_enabled(tp))
return -EBUSY;
+ /* Will fail if probe is being used by ftrace or perf */
+ if (unregister_probe_event(tp))
+ return -EBUSY;
+
__unregister_trace_probe(tp);
list_del(&tp->list);
- unregister_probe_event(tp);
return 0;
}
@@ -632,7 +635,9 @@
/* TODO: Use batch unregistration */
while (!list_empty(&probe_list)) {
tp = list_entry(probe_list.next, struct trace_probe, list);
- unregister_trace_probe(tp);
+ ret = unregister_trace_probe(tp);
+ if (ret)
+ goto end;
free_trace_probe(tp);
}
@@ -1247,11 +1252,15 @@
return ret;
}
-static void unregister_probe_event(struct trace_probe *tp)
+static int unregister_probe_event(struct trace_probe *tp)
{
+ int ret;
+
/* tp->event is unregistered in trace_remove_event_call() */
- trace_remove_event_call(&tp->call);
- kfree(tp->call.print_fmt);
+ ret = trace_remove_event_call(&tp->call);
+ if (!ret)
+ kfree(tp->call.print_fmt);
+ return ret;
}
/* Make a debugfs interface for controlling probe points */
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
index a23d2d71..272261b 100644
--- a/kernel/trace/trace_uprobe.c
+++ b/kernel/trace/trace_uprobe.c
@@ -70,7 +70,7 @@
(sizeof(struct probe_arg) * (n)))
static int register_uprobe_event(struct trace_uprobe *tu);
-static void unregister_uprobe_event(struct trace_uprobe *tu);
+static int unregister_uprobe_event(struct trace_uprobe *tu);
static DEFINE_MUTEX(uprobe_lock);
static LIST_HEAD(uprobe_list);
@@ -164,11 +164,17 @@
}
/* Unregister a trace_uprobe and probe_event: call with locking uprobe_lock */
-static void unregister_trace_uprobe(struct trace_uprobe *tu)
+static int unregister_trace_uprobe(struct trace_uprobe *tu)
{
+ int ret;
+
+ ret = unregister_uprobe_event(tu);
+ if (ret)
+ return ret;
+
list_del(&tu->list);
- unregister_uprobe_event(tu);
free_trace_uprobe(tu);
+ return 0;
}
/* Register a trace_uprobe and probe_event */
@@ -181,9 +187,12 @@
/* register as an event */
old_tp = find_probe_event(tu->call.name, tu->call.class->system);
- if (old_tp)
+ if (old_tp) {
/* delete old event */
- unregister_trace_uprobe(old_tp);
+ ret = unregister_trace_uprobe(old_tp);
+ if (ret)
+ goto end;
+ }
ret = register_uprobe_event(tu);
if (ret) {
@@ -256,6 +265,8 @@
group = UPROBE_EVENT_SYSTEM;
if (is_delete) {
+ int ret;
+
if (!event) {
pr_info("Delete command needs an event name.\n");
return -EINVAL;
@@ -269,9 +280,9 @@
return -ENOENT;
}
/* delete an event */
- unregister_trace_uprobe(tu);
+ ret = unregister_trace_uprobe(tu);
mutex_unlock(&uprobe_lock);
- return 0;
+ return ret;
}
if (argc < 2) {
@@ -408,16 +419,20 @@
return ret;
}
-static void cleanup_all_probes(void)
+static int cleanup_all_probes(void)
{
struct trace_uprobe *tu;
+ int ret = 0;
mutex_lock(&uprobe_lock);
while (!list_empty(&uprobe_list)) {
tu = list_entry(uprobe_list.next, struct trace_uprobe, list);
- unregister_trace_uprobe(tu);
+ ret = unregister_trace_uprobe(tu);
+ if (ret)
+ break;
}
mutex_unlock(&uprobe_lock);
+ return ret;
}
/* Probes listing interfaces */
@@ -462,8 +477,13 @@
static int probes_open(struct inode *inode, struct file *file)
{
- if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC))
- cleanup_all_probes();
+ int ret;
+
+ if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
+ ret = cleanup_all_probes();
+ if (ret)
+ return ret;
+ }
return seq_open(file, &probes_seq_op);
}
@@ -968,12 +988,17 @@
return ret;
}
-static void unregister_uprobe_event(struct trace_uprobe *tu)
+static int unregister_uprobe_event(struct trace_uprobe *tu)
{
+ int ret;
+
/* tu->event is unregistered in trace_remove_event_call() */
- trace_remove_event_call(&tu->call);
+ ret = trace_remove_event_call(&tu->call);
+ if (ret)
+ return ret;
kfree(tu->call.print_fmt);
tu->call.print_fmt = NULL;
+ return 0;
}
/* Make a trace interface for controling probe points */
diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
index d8c30db..9064b91 100644
--- a/kernel/user_namespace.c
+++ b/kernel/user_namespace.c
@@ -62,6 +62,9 @@
kgid_t group = new->egid;
int ret;
+ if (parent_ns->level > 32)
+ return -EUSERS;
+
/*
* Verify that we can not violate the policy of which files
* may be accessed that is specified by the root directory,
@@ -92,6 +95,7 @@
atomic_set(&ns->count, 1);
/* Leave the new->user_ns reference with the new user namespace. */
ns->parent = parent_ns;
+ ns->level = parent_ns->level + 1;
ns->owner = owner;
ns->group = group;
@@ -105,16 +109,21 @@
int unshare_userns(unsigned long unshare_flags, struct cred **new_cred)
{
struct cred *cred;
+ int err = -ENOMEM;
if (!(unshare_flags & CLONE_NEWUSER))
return 0;
cred = prepare_creds();
- if (!cred)
- return -ENOMEM;
+ if (cred) {
+ err = create_user_ns(cred);
+ if (err)
+ put_cred(cred);
+ else
+ *new_cred = cred;
+ }
- *new_cred = cred;
- return create_user_ns(cred);
+ return err;
}
void free_user_ns(struct user_namespace *ns)
diff --git a/kernel/wait.c b/kernel/wait.c
index dec68bd..d550920 100644
--- a/kernel/wait.c
+++ b/kernel/wait.c
@@ -363,8 +363,7 @@
/**
* wake_up_atomic_t - Wake up a waiter on a atomic_t
- * @word: The word being waited on, a kernel virtual address
- * @bit: The bit of the word being waited on
+ * @p: The atomic_t being waited on, a kernel virtual address
*
* Wake up anyone waiting for the atomic_t to go to zero.
*
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 0b72e81..7f5d4be 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -2817,6 +2817,19 @@
return false;
}
+static bool __flush_work(struct work_struct *work)
+{
+ struct wq_barrier barr;
+
+ if (start_flush_work(work, &barr)) {
+ wait_for_completion(&barr.done);
+ destroy_work_on_stack(&barr.work);
+ return true;
+ } else {
+ return false;
+ }
+}
+
/**
* flush_work - wait for a work to finish executing the last queueing instance
* @work: the work to flush
@@ -2830,18 +2843,10 @@
*/
bool flush_work(struct work_struct *work)
{
- struct wq_barrier barr;
-
lock_map_acquire(&work->lockdep_map);
lock_map_release(&work->lockdep_map);
- if (start_flush_work(work, &barr)) {
- wait_for_completion(&barr.done);
- destroy_work_on_stack(&barr.work);
- return true;
- } else {
- return false;
- }
+ return __flush_work(work);
}
EXPORT_SYMBOL_GPL(flush_work);
@@ -3411,6 +3416,12 @@
{
to->nice = from->nice;
cpumask_copy(to->cpumask, from->cpumask);
+ /*
+ * Unlike hash and equality test, this function doesn't ignore
+ * ->no_numa as it is used for both pool and wq attrs. Instead,
+ * get_unbound_pool() explicitly clears ->no_numa after copying.
+ */
+ to->no_numa = from->no_numa;
}
/* hash value of the content of @attr */
@@ -3578,6 +3589,12 @@
lockdep_set_subclass(&pool->lock, 1); /* see put_pwq() */
copy_workqueue_attrs(pool->attrs, attrs);
+ /*
+ * no_numa isn't a worker_pool attribute, always clear it. See
+ * 'struct workqueue_attrs' comments for detail.
+ */
+ pool->attrs->no_numa = false;
+
/* if cpumask is contained inside a NUMA node, we belong to that node */
if (wq_numa_enabled) {
for_each_node(node) {
@@ -4756,7 +4773,14 @@
INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn);
schedule_work_on(cpu, &wfc.work);
- flush_work(&wfc.work);
+
+ /*
+ * The work item is on-stack and can't lead to deadlock through
+ * flushing. Use __flush_work() to avoid spurious lockdep warnings
+ * when work_on_cpu()s are nested.
+ */
+ __flush_work(&wfc.work);
+
return wfc.ret;
}
EXPORT_SYMBOL_GPL(work_on_cpu);
diff --git a/lib/lz4/lz4_compress.c b/lib/lz4/lz4_compress.c
index fd94058..28321d8 100644
--- a/lib/lz4/lz4_compress.c
+++ b/lib/lz4/lz4_compress.c
@@ -437,7 +437,7 @@
exit:
return ret;
}
-EXPORT_SYMBOL_GPL(lz4_compress);
+EXPORT_SYMBOL(lz4_compress);
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("Dual BSD/GPL");
MODULE_DESCRIPTION("LZ4 compressor");
diff --git a/lib/lz4/lz4_decompress.c b/lib/lz4/lz4_decompress.c
index d3414ea..411be80 100644
--- a/lib/lz4/lz4_decompress.c
+++ b/lib/lz4/lz4_decompress.c
@@ -299,7 +299,7 @@
return ret;
}
#ifndef STATIC
-EXPORT_SYMBOL_GPL(lz4_decompress);
+EXPORT_SYMBOL(lz4_decompress);
#endif
int lz4_decompress_unknownoutputsize(const char *src, size_t src_len,
@@ -319,8 +319,8 @@
return ret;
}
#ifndef STATIC
-EXPORT_SYMBOL_GPL(lz4_decompress_unknownoutputsize);
+EXPORT_SYMBOL(lz4_decompress_unknownoutputsize);
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("Dual BSD/GPL");
MODULE_DESCRIPTION("LZ4 Decompressor");
#endif
diff --git a/lib/lz4/lz4hc_compress.c b/lib/lz4/lz4hc_compress.c
index eb1a74f..f344f76 100644
--- a/lib/lz4/lz4hc_compress.c
+++ b/lib/lz4/lz4hc_compress.c
@@ -533,7 +533,7 @@
exit:
return ret;
}
-EXPORT_SYMBOL_GPL(lz4hc_compress);
+EXPORT_SYMBOL(lz4hc_compress);
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("Dual BSD/GPL");
MODULE_DESCRIPTION("LZ4HC compressor");
diff --git a/mm/fremap.c b/mm/fremap.c
index 87da359..5bff081 100644
--- a/mm/fremap.c
+++ b/mm/fremap.c
@@ -57,17 +57,22 @@
unsigned long addr, unsigned long pgoff, pgprot_t prot)
{
int err = -ENOMEM;
- pte_t *pte;
+ pte_t *pte, ptfile;
spinlock_t *ptl;
pte = get_locked_pte(mm, addr, &ptl);
if (!pte)
goto out;
- if (!pte_none(*pte))
- zap_pte(mm, vma, addr, pte);
+ ptfile = pgoff_to_pte(pgoff);
- set_pte_at(mm, addr, pte, pgoff_to_pte(pgoff));
+ if (!pte_none(*pte)) {
+ if (pte_present(*pte) && pte_soft_dirty(*pte))
+ pte_file_mksoft_dirty(ptfile);
+ zap_pte(mm, vma, addr, pte);
+ }
+
+ set_pte_at(mm, addr, pte, ptfile);
/*
* We don't need to run update_mmu_cache() here because the "file pte"
* being installed by install_file_pte() is not a real pte - it's a
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 83aff0a..b60f330 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2490,7 +2490,7 @@
mm = vma->vm_mm;
- tlb_gather_mmu(&tlb, mm, 0);
+ tlb_gather_mmu(&tlb, mm, start, end);
__unmap_hugepage_range(&tlb, vma, start, end, ref_page);
tlb_finish_mmu(&tlb, start, end);
}
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index c290a1c..0878ff7 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -3195,11 +3195,11 @@
if (!s->memcg_params)
return -ENOMEM;
- INIT_WORK(&s->memcg_params->destroy,
- kmem_cache_destroy_work_func);
if (memcg) {
s->memcg_params->memcg = memcg;
s->memcg_params->root_cache = root_cache;
+ INIT_WORK(&s->memcg_params->destroy,
+ kmem_cache_destroy_work_func);
} else
s->memcg_params->is_root_cache = true;
@@ -6969,7 +6969,6 @@
#ifdef CONFIG_MEMCG_SWAP
static int __init enable_swap_account(char *s)
{
- /* consider enabled if no parameter or 1 is given */
if (!strcmp(s, "1"))
really_do_swap_account = 1;
else if (!strcmp(s, "0"))
diff --git a/mm/memory.c b/mm/memory.c
index 1ce2e2a..af84bc0 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -209,14 +209,15 @@
* tear-down from @mm. The @fullmm argument is used when @mm is without
* users and we're going to destroy the full address space (exit/execve).
*/
-void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm)
+void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
{
tlb->mm = mm;
- tlb->fullmm = fullmm;
+ /* Is it from 0 to ~0? */
+ tlb->fullmm = !(start | (end+1));
tlb->need_flush_all = 0;
- tlb->start = -1UL;
- tlb->end = 0;
+ tlb->start = start;
+ tlb->end = end;
tlb->need_flush = 0;
tlb->local.next = NULL;
tlb->local.nr = 0;
@@ -256,8 +257,6 @@
{
struct mmu_gather_batch *batch, *next;
- tlb->start = start;
- tlb->end = end;
tlb_flush_mmu(tlb);
/* keep the page table cache within bounds */
@@ -1099,7 +1098,6 @@
spinlock_t *ptl;
pte_t *start_pte;
pte_t *pte;
- unsigned long range_start = addr;
again:
init_rss_vec(rss);
@@ -1141,9 +1139,12 @@
continue;
if (unlikely(details) && details->nonlinear_vma
&& linear_page_index(details->nonlinear_vma,
- addr) != page->index)
- set_pte_at(mm, addr, pte,
- pgoff_to_pte(page->index));
+ addr) != page->index) {
+ pte_t ptfile = pgoff_to_pte(page->index);
+ if (pte_soft_dirty(ptent))
+ pte_file_mksoft_dirty(ptfile);
+ set_pte_at(mm, addr, pte, ptfile);
+ }
if (PageAnon(page))
rss[MM_ANONPAGES]--;
else {
@@ -1202,17 +1203,25 @@
* and page-free while holding it.
*/
if (force_flush) {
+ unsigned long old_end;
+
force_flush = 0;
-#ifdef HAVE_GENERIC_MMU_GATHER
- tlb->start = range_start;
+ /*
+ * Flush the TLB just for the previous segment,
+ * then update the range to be the remaining
+ * TLB range.
+ */
+ old_end = tlb->end;
tlb->end = addr;
-#endif
+
tlb_flush_mmu(tlb);
- if (addr != end) {
- range_start = addr;
+
+ tlb->start = addr;
+ tlb->end = old_end;
+
+ if (addr != end)
goto again;
- }
}
return addr;
@@ -1397,7 +1406,7 @@
unsigned long end = start + size;
lru_add_drain();
- tlb_gather_mmu(&tlb, mm, 0);
+ tlb_gather_mmu(&tlb, mm, start, end);
update_hiwater_rss(mm);
mmu_notifier_invalidate_range_start(mm, start, end);
for ( ; vma && vma->vm_start < end; vma = vma->vm_next)
@@ -1423,7 +1432,7 @@
unsigned long end = address + size;
lru_add_drain();
- tlb_gather_mmu(&tlb, mm, 0);
+ tlb_gather_mmu(&tlb, mm, address, end);
update_hiwater_rss(mm);
mmu_notifier_invalidate_range_start(mm, address, end);
unmap_single_vma(&tlb, vma, address, end, details);
@@ -3115,6 +3124,8 @@
exclusive = 1;
}
flush_icache_page(vma, page);
+ if (pte_swp_soft_dirty(orig_pte))
+ pte = pte_mksoft_dirty(pte);
set_pte_at(mm, address, page_table, pte);
if (page == swapcache)
do_page_add_anon_rmap(page, vma, address, exclusive);
@@ -3408,6 +3419,8 @@
entry = mk_pte(page, vma->vm_page_prot);
if (flags & FAULT_FLAG_WRITE)
entry = maybe_mkwrite(pte_mkdirty(entry), vma);
+ else if (pte_file(orig_pte) && pte_file_soft_dirty(orig_pte))
+ pte_mksoft_dirty(entry);
if (anon) {
inc_mm_counter_fast(mm, MM_ANONPAGES);
page_add_new_anon_rmap(page, vma, address);
diff --git a/mm/mmap.c b/mm/mmap.c
index 1edbaa3..f9c97d1 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2336,7 +2336,7 @@
struct mmu_gather tlb;
lru_add_drain();
- tlb_gather_mmu(&tlb, mm, 0);
+ tlb_gather_mmu(&tlb, mm, start, end);
update_hiwater_rss(mm);
unmap_vmas(&tlb, vma, start, end);
free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
@@ -2709,7 +2709,7 @@
lru_add_drain();
flush_cache_mm(mm);
- tlb_gather_mmu(&tlb, mm, 1);
+ tlb_gather_mmu(&tlb, mm, 0, -1);
/* update_hiwater_rss(mm) here? but nobody should be looking */
/* Use -1 here to ensure all VMAs in the mm are unmapped */
unmap_vmas(&tlb, vma, 0, -1);
diff --git a/mm/rmap.c b/mm/rmap.c
index cd356df..b2e29ac 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1236,6 +1236,7 @@
swp_entry_to_pte(make_hwpoison_entry(page)));
} else if (PageAnon(page)) {
swp_entry_t entry = { .val = page_private(page) };
+ pte_t swp_pte;
if (PageSwapCache(page)) {
/*
@@ -1264,7 +1265,10 @@
BUG_ON(TTU_ACTION(flags) != TTU_MIGRATION);
entry = make_migration_entry(page, pte_write(pteval));
}
- set_pte_at(mm, address, pte, swp_entry_to_pte(entry));
+ swp_pte = swp_entry_to_pte(entry);
+ if (pte_soft_dirty(pteval))
+ swp_pte = pte_swp_mksoft_dirty(swp_pte);
+ set_pte_at(mm, address, pte, swp_pte);
BUG_ON(pte_file(*pte));
} else if (IS_ENABLED(CONFIG_MIGRATION) &&
(TTU_ACTION(flags) == TTU_MIGRATION)) {
@@ -1401,8 +1405,12 @@
pteval = ptep_clear_flush(vma, address, pte);
/* If nonlinear, store the file page offset in the pte. */
- if (page->index != linear_page_index(vma, address))
- set_pte_at(mm, address, pte, pgoff_to_pte(page->index));
+ if (page->index != linear_page_index(vma, address)) {
+ pte_t ptfile = pgoff_to_pte(page->index);
+ if (pte_soft_dirty(pteval))
+ pte_file_mksoft_dirty(ptfile);
+ set_pte_at(mm, address, pte, ptfile);
+ }
/* Move the dirty bit to the physical page now the pte is gone. */
if (pte_dirty(pteval))
diff --git a/mm/shmem.c b/mm/shmem.c
index 8335dbd..e43dc55 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -2909,14 +2909,8 @@
/* common code */
-static char *shmem_dname(struct dentry *dentry, char *buffer, int buflen)
-{
- return dynamic_dname(dentry, buffer, buflen, "/%s (deleted)",
- dentry->d_name.name);
-}
-
static struct dentry_operations anon_ops = {
- .d_dname = shmem_dname
+ .d_dname = simple_dname
};
/**
diff --git a/mm/slub.c b/mm/slub.c
index 2b02d66..e3ba1f2 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1968,9 +1968,6 @@
int pages;
int pobjects;
- if (!s->cpu_partial)
- return;
-
do {
pages = 0;
pobjects = 0;
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 36af6ee..6cf2e60 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -866,6 +866,21 @@
}
#endif /* CONFIG_HIBERNATION */
+static inline int maybe_same_pte(pte_t pte, pte_t swp_pte)
+{
+#ifdef CONFIG_MEM_SOFT_DIRTY
+ /*
+ * When pte keeps soft dirty bit the pte generated
+ * from swap entry does not has it, still it's same
+ * pte from logical point of view.
+ */
+ pte_t swp_pte_dirty = pte_swp_mksoft_dirty(swp_pte);
+ return pte_same(pte, swp_pte) || pte_same(pte, swp_pte_dirty);
+#else
+ return pte_same(pte, swp_pte);
+#endif
+}
+
/*
* No need to decide whether this PTE shares the swap entry with others,
* just let do_wp_page work it out if a write is requested later - to
@@ -892,7 +907,7 @@
}
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
- if (unlikely(!pte_same(*pte, swp_entry_to_pte(entry)))) {
+ if (unlikely(!maybe_same_pte(*pte, swp_entry_to_pte(entry)))) {
mem_cgroup_cancel_charge_swapin(memcg);
ret = 0;
goto out;
@@ -947,7 +962,7 @@
* swapoff spends a _lot_ of time in this loop!
* Test inline before going to call unuse_pte.
*/
- if (unlikely(pte_same(*pte, swp_pte))) {
+ if (unlikely(maybe_same_pte(*pte, swp_pte))) {
pte_unmap(pte);
ret = unuse_pte(vma, pmd, addr, entry, page);
if (ret)
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index 4a78c4d..6ee48aa 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -91,7 +91,12 @@
struct net_device *vlan_dev_real_dev(const struct net_device *dev)
{
- return vlan_dev_priv(dev)->real_dev;
+ struct net_device *ret = vlan_dev_priv(dev)->real_dev;
+
+ while (is_vlan_dev(ret))
+ ret = vlan_dev_priv(ret)->real_dev;
+
+ return ret;
}
EXPORT_SYMBOL(vlan_dev_real_dev);
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
index e14531f..264de88 100644
--- a/net/batman-adv/bridge_loop_avoidance.c
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@ -1529,6 +1529,8 @@
* in these cases, the skb is further handled by this function and
* returns 1, otherwise it returns 0 and the caller shall further
* process the skb.
+ *
+ * This call might reallocate skb data.
*/
int batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb,
unsigned short vid)
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index f105219..7614af3 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -508,6 +508,7 @@
return 0;
}
+/* this call might reallocate skb data */
static bool batadv_is_type_dhcprequest(struct sk_buff *skb, int header_len)
{
int ret = false;
@@ -568,6 +569,7 @@
return ret;
}
+/* this call might reallocate skb data */
bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len)
{
struct ethhdr *ethhdr;
@@ -619,6 +621,12 @@
if (!pskb_may_pull(skb, *header_len + sizeof(*udphdr)))
return false;
+
+ /* skb->data might have been reallocated by pskb_may_pull() */
+ ethhdr = (struct ethhdr *)skb->data;
+ if (ntohs(ethhdr->h_proto) == ETH_P_8021Q)
+ ethhdr = (struct ethhdr *)(skb->data + VLAN_HLEN);
+
udphdr = (struct udphdr *)(skb->data + *header_len);
*header_len += sizeof(*udphdr);
@@ -634,12 +642,14 @@
return true;
}
+/* this call might reallocate skb data */
bool batadv_gw_out_of_range(struct batadv_priv *bat_priv,
- struct sk_buff *skb, struct ethhdr *ethhdr)
+ struct sk_buff *skb)
{
struct batadv_neigh_node *neigh_curr = NULL, *neigh_old = NULL;
struct batadv_orig_node *orig_dst_node = NULL;
struct batadv_gw_node *curr_gw = NULL;
+ struct ethhdr *ethhdr;
bool ret, out_of_range = false;
unsigned int header_len = 0;
uint8_t curr_tq_avg;
@@ -648,6 +658,7 @@
if (!ret)
goto out;
+ ethhdr = (struct ethhdr *)skb->data;
orig_dst_node = batadv_transtable_search(bat_priv, ethhdr->h_source,
ethhdr->h_dest);
if (!orig_dst_node)
diff --git a/net/batman-adv/gateway_client.h b/net/batman-adv/gateway_client.h
index 039902d..1037d75 100644
--- a/net/batman-adv/gateway_client.h
+++ b/net/batman-adv/gateway_client.h
@@ -34,7 +34,6 @@
void batadv_gw_node_purge(struct batadv_priv *bat_priv);
int batadv_gw_client_seq_print_text(struct seq_file *seq, void *offset);
bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len);
-bool batadv_gw_out_of_range(struct batadv_priv *bat_priv,
- struct sk_buff *skb, struct ethhdr *ethhdr);
+bool batadv_gw_out_of_range(struct batadv_priv *bat_priv, struct sk_buff *skb);
#endif /* _NET_BATMAN_ADV_GATEWAY_CLIENT_H_ */
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index 700d0b4..0f04e1c 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -180,6 +180,9 @@
if (batadv_bla_tx(bat_priv, skb, vid))
goto dropped;
+ /* skb->data might have been reallocated by batadv_bla_tx() */
+ ethhdr = (struct ethhdr *)skb->data;
+
/* Register the client MAC in the transtable */
if (!is_multicast_ether_addr(ethhdr->h_source))
batadv_tt_local_add(soft_iface, ethhdr->h_source, skb->skb_iif);
@@ -220,6 +223,10 @@
default:
break;
}
+
+ /* reminder: ethhdr might have become unusable from here on
+ * (batadv_gw_is_dhcp_target() might have reallocated skb data)
+ */
}
/* ethernet packet should be broadcasted */
@@ -266,7 +273,7 @@
/* unicast packet */
} else {
if (atomic_read(&bat_priv->gw_mode) != BATADV_GW_MODE_OFF) {
- ret = batadv_gw_out_of_range(bat_priv, skb, ethhdr);
+ ret = batadv_gw_out_of_range(bat_priv, skb);
if (ret)
goto dropped;
}
diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
index dc8b5d4..857e1b8 100644
--- a/net/batman-adv/unicast.c
+++ b/net/batman-adv/unicast.c
@@ -326,7 +326,9 @@
* @skb: the skb containing the payload to encapsulate
* @orig_node: the destination node
*
- * Returns false if the payload could not be encapsulated or true otherwise
+ * Returns false if the payload could not be encapsulated or true otherwise.
+ *
+ * This call might reallocate skb data.
*/
static bool batadv_unicast_prepare_skb(struct sk_buff *skb,
struct batadv_orig_node *orig_node)
@@ -343,7 +345,9 @@
* @orig_node: the destination node
* @packet_subtype: the batman 4addr packet subtype to use
*
- * Returns false if the payload could not be encapsulated or true otherwise
+ * Returns false if the payload could not be encapsulated or true otherwise.
+ *
+ * This call might reallocate skb data.
*/
bool batadv_unicast_4addr_prepare_skb(struct batadv_priv *bat_priv,
struct sk_buff *skb,
@@ -401,7 +405,7 @@
struct batadv_neigh_node *neigh_node;
int data_len = skb->len;
int ret = NET_RX_DROP;
- unsigned int dev_mtu;
+ unsigned int dev_mtu, header_len;
/* get routing information */
if (is_multicast_ether_addr(ethhdr->h_dest)) {
@@ -428,11 +432,17 @@
switch (packet_type) {
case BATADV_UNICAST:
- batadv_unicast_prepare_skb(skb, orig_node);
+ if (!batadv_unicast_prepare_skb(skb, orig_node))
+ goto out;
+
+ header_len = sizeof(struct batadv_unicast_packet);
break;
case BATADV_UNICAST_4ADDR:
- batadv_unicast_4addr_prepare_skb(bat_priv, skb, orig_node,
- packet_subtype);
+ if (!batadv_unicast_4addr_prepare_skb(bat_priv, skb, orig_node,
+ packet_subtype))
+ goto out;
+
+ header_len = sizeof(struct batadv_unicast_4addr_packet);
break;
default:
/* this function supports UNICAST and UNICAST_4ADDR only. It
@@ -441,6 +451,7 @@
goto out;
}
+ ethhdr = (struct ethhdr *)(skb->data + header_len);
unicast_packet = (struct batadv_unicast_packet *)skb->data;
/* inform the destination node that we are still missing a correct route
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index 60aca91..ffd5874 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -161,7 +161,7 @@
if (!pv)
return;
- for_each_set_bit_from(vid, pv->vlan_bitmap, BR_VLAN_BITMAP_LEN) {
+ for_each_set_bit_from(vid, pv->vlan_bitmap, VLAN_N_VID) {
f = __br_fdb_get(br, br->dev->dev_addr, vid);
if (f && f->is_local && !f->dst)
fdb_delete(br, f);
@@ -730,7 +730,7 @@
/* VID was specified, so use it. */
err = __br_fdb_add(ndm, p, addr, nlh_flags, vid);
} else {
- if (!pv || bitmap_empty(pv->vlan_bitmap, BR_VLAN_BITMAP_LEN)) {
+ if (!pv || bitmap_empty(pv->vlan_bitmap, VLAN_N_VID)) {
err = __br_fdb_add(ndm, p, addr, nlh_flags, 0);
goto out;
}
@@ -739,7 +739,7 @@
* specify a VLAN. To be nice, add/update entry for every
* vlan on this port.
*/
- for_each_set_bit(vid, pv->vlan_bitmap, BR_VLAN_BITMAP_LEN) {
+ for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) {
err = __br_fdb_add(ndm, p, addr, nlh_flags, vid);
if (err)
goto out;
@@ -817,7 +817,7 @@
err = __br_fdb_delete(p, addr, vid);
} else {
- if (!pv || bitmap_empty(pv->vlan_bitmap, BR_VLAN_BITMAP_LEN)) {
+ if (!pv || bitmap_empty(pv->vlan_bitmap, VLAN_N_VID)) {
err = __br_fdb_delete(p, addr, 0);
goto out;
}
@@ -827,7 +827,7 @@
* vlan on this port.
*/
err = -ENOENT;
- for_each_set_bit(vid, pv->vlan_bitmap, BR_VLAN_BITMAP_LEN) {
+ for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) {
err &= __br_fdb_delete(p, addr, vid);
}
}
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 61c5e81..08e576a 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -1195,7 +1195,7 @@
max_delay = msecs_to_jiffies(ntohs(mld->mld_maxdelay));
if (max_delay)
group = &mld->mld_mca;
- } else if (skb->len >= sizeof(*mld2q)) {
+ } else {
if (!pskb_may_pull(skb, sizeof(*mld2q))) {
err = -EINVAL;
goto out;
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index 1fc30ab..b9259ef 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -132,7 +132,7 @@
else
pv = br_get_vlan_info(br);
- if (!pv || bitmap_empty(pv->vlan_bitmap, BR_VLAN_BITMAP_LEN))
+ if (!pv || bitmap_empty(pv->vlan_bitmap, VLAN_N_VID))
goto done;
af = nla_nest_start(skb, IFLA_AF_SPEC);
@@ -140,7 +140,7 @@
goto nla_put_failure;
pvid = br_get_pvid(pv);
- for_each_set_bit(vid, pv->vlan_bitmap, BR_VLAN_BITMAP_LEN) {
+ for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) {
vinfo.vid = vid;
vinfo.flags = 0;
if (vid == pvid)
diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c
index 394bb96..3b9637f 100644
--- a/net/bridge/br_sysfs_br.c
+++ b/net/bridge/br_sysfs_br.c
@@ -1,5 +1,5 @@
/*
- * Sysfs attributes of bridge ports
+ * Sysfs attributes of bridge
* Linux ethernet bridge
*
* Authors:
diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
index bd58b45..9a9ffe7 100644
--- a/net/bridge/br_vlan.c
+++ b/net/bridge/br_vlan.c
@@ -108,7 +108,7 @@
clear_bit(vid, v->vlan_bitmap);
v->num_vlans--;
- if (bitmap_empty(v->vlan_bitmap, BR_VLAN_BITMAP_LEN)) {
+ if (bitmap_empty(v->vlan_bitmap, VLAN_N_VID)) {
if (v->port_idx)
rcu_assign_pointer(v->parent.port->vlan_info, NULL);
else
@@ -122,7 +122,7 @@
{
smp_wmb();
v->pvid = 0;
- bitmap_zero(v->vlan_bitmap, BR_VLAN_BITMAP_LEN);
+ bitmap_zero(v->vlan_bitmap, VLAN_N_VID);
if (v->port_idx)
rcu_assign_pointer(v->parent.port->vlan_info, NULL);
else
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index 00ee068..b84a1b1 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -65,6 +65,7 @@
nhoff += sizeof(struct ipv6hdr);
break;
}
+ case __constant_htons(ETH_P_8021AD):
case __constant_htons(ETH_P_8021Q): {
const struct vlan_hdr *vlan;
struct vlan_hdr _vlan;
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 9232c68..60533db 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -1441,16 +1441,18 @@
atomic_set(&p->refcnt, 1);
p->reachable_time =
neigh_rand_reach_time(p->base_reachable_time);
-
- if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) {
- kfree(p);
- return NULL;
- }
-
dev_hold(dev);
p->dev = dev;
write_pnet(&p->net, hold_net(net));
p->sysctl_table = NULL;
+
+ if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) {
+ release_net(net);
+ dev_put(dev);
+ kfree(p);
+ return NULL;
+ }
+
write_lock_bh(&tbl->lock);
p->next = tbl->parms.next;
tbl->parms.next = p;
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 3de7408..ca198c1 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -2156,7 +2156,7 @@
/* If aging addresses are supported device will need to
* implement its own handler for this.
*/
- if (ndm->ndm_state & NUD_PERMANENT) {
+ if (!(ndm->ndm_state & NUD_PERMANENT)) {
pr_info("%s: FDB only supports static addresses\n", dev->name);
return -EINVAL;
}
@@ -2384,7 +2384,7 @@
struct nlattr *extfilt;
u32 filter_mask = 0;
- extfilt = nlmsg_find_attr(cb->nlh, sizeof(struct rtgenmsg),
+ extfilt = nlmsg_find_attr(cb->nlh, sizeof(struct ifinfomsg),
IFLA_EXT_MASK);
if (extfilt)
filter_mask = nla_get_u32(extfilt);
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index ab3d814b..109ee89 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -477,7 +477,7 @@
}
return ((mtu - x->props.header_len - crypto_aead_authsize(esp->aead) -
- net_adj) & ~(align - 1)) + (net_adj - 2);
+ net_adj) & ~(align - 1)) + net_adj - 2;
}
static void esp4_err(struct sk_buff *skb, u32 info)
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 108a1e9c..3df6d3e 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -71,7 +71,6 @@
#include <linux/init.h>
#include <linux/list.h>
#include <linux/slab.h>
-#include <linux/prefetch.h>
#include <linux/export.h>
#include <net/net_namespace.h>
#include <net/ip.h>
@@ -1761,10 +1760,8 @@
if (!c)
continue;
- if (IS_LEAF(c)) {
- prefetch(rcu_dereference_rtnl(p->child[idx]));
+ if (IS_LEAF(c))
return (struct leaf *) c;
- }
/* Rescan start scanning in new node */
p = (struct tnode *) c;
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 1f6eab6..8d6939e 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -383,7 +383,7 @@
if (daddr)
memcpy(&iph->daddr, daddr, 4);
if (iph->daddr)
- return t->hlen;
+ return t->hlen + sizeof(*iph);
return -(t->hlen + sizeof(*iph));
}
diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
index 7167b08..850525b 100644
--- a/net/ipv4/ip_tunnel_core.c
+++ b/net/ipv4/ip_tunnel_core.c
@@ -76,9 +76,7 @@
iph->daddr = dst;
iph->saddr = src;
iph->ttl = ttl;
- tunnel_ip_select_ident(skb,
- (const struct iphdr *)skb_inner_network_header(skb),
- &rt->dst);
+ __ip_select_ident(iph, &rt->dst, (skb_shinfo(skb)->gso_segs ?: 1) - 1);
err = ip_local_out(skb);
if (unlikely(net_xmit_eval(err)))
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index 6577a11..463bd12 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -273,7 +273,7 @@
SNMP_MIB_ITEM("TCPFastOpenListenOverflow", LINUX_MIB_TCPFASTOPENLISTENOVERFLOW),
SNMP_MIB_ITEM("TCPFastOpenCookieReqd", LINUX_MIB_TCPFASTOPENCOOKIEREQD),
SNMP_MIB_ITEM("TCPSpuriousRtxHostQueues", LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES),
- SNMP_MIB_ITEM("LowLatencyRxPackets", LINUX_MIB_LOWLATENCYRXPACKETS),
+ SNMP_MIB_ITEM("BusyPollRxPackets", LINUX_MIB_BUSYPOLLRXPACKETS),
SNMP_MIB_SENTINEL
};
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 5423223..b2f6c74 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1121,6 +1121,13 @@
goto wait_for_memory;
/*
+ * All packets are restored as if they have
+ * already been sent.
+ */
+ if (tp->repair)
+ TCP_SKB_CB(skb)->when = tcp_time_stamp;
+
+ /*
* Check whether we can use HW checksum.
*/
if (sk->sk_route_caps & NETIF_F_ALL_CSUM)
diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c
index a9077f4..b6ae92a 100644
--- a/net/ipv4/tcp_cubic.c
+++ b/net/ipv4/tcp_cubic.c
@@ -206,8 +206,8 @@
*/
static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
{
- u64 offs;
- u32 delta, t, bic_target, max_cnt;
+ u32 delta, bic_target, max_cnt;
+ u64 offs, t;
ca->ack_cnt++; /* count the number of ACKs */
@@ -250,9 +250,11 @@
* if the cwnd < 1 million packets !!!
*/
+ t = (s32)(tcp_time_stamp - ca->epoch_start);
+ t += msecs_to_jiffies(ca->delay_min >> 3);
/* change the unit from HZ to bictcp_HZ */
- t = ((tcp_time_stamp + msecs_to_jiffies(ca->delay_min>>3)
- - ca->epoch_start) << BICTCP_HZ) / HZ;
+ t <<= BICTCP_HZ;
+ do_div(t, HZ);
if (t < ca->bic_K) /* t - K */
offs = ca->bic_K - t;
@@ -414,7 +416,7 @@
return;
/* Discard delay samples right after fast recovery */
- if ((s32)(tcp_time_stamp - ca->epoch_start) < HZ)
+ if (ca->epoch_start && (s32)(tcp_time_stamp - ca->epoch_start) < HZ)
return;
delay = (rtt_us << 3) / USEC_PER_MSEC;
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index da4241c..498ea99 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -1126,12 +1126,10 @@
if (ifp->flags & IFA_F_OPTIMISTIC)
addr_flags |= IFA_F_OPTIMISTIC;
- ift = !max_addresses ||
- ipv6_count_addresses(idev) < max_addresses ?
- ipv6_add_addr(idev, &addr, NULL, tmp_plen,
- ipv6_addr_scope(&addr), addr_flags,
- tmp_valid_lft, tmp_prefered_lft) : NULL;
- if (IS_ERR_OR_NULL(ift)) {
+ ift = ipv6_add_addr(idev, &addr, NULL, tmp_plen,
+ ipv6_addr_scope(&addr), addr_flags,
+ tmp_valid_lft, tmp_prefered_lft);
+ if (IS_ERR(ift)) {
in6_ifa_put(ifp);
in6_dev_put(idev);
pr_info("%s: retry temporary address regeneration\n", __func__);
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index 40ffd72..aeac0dc 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -425,7 +425,7 @@
net_adj = 0;
return ((mtu - x->props.header_len - crypto_aead_authsize(esp->aead) -
- net_adj) & ~(align - 1)) + (net_adj - 2);
+ net_adj) & ~(align - 1)) + net_adj - 2;
}
static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index bff3d82..c4ff5bb 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -993,14 +993,22 @@
if (ipv6_prefix_equal(&key->addr, args->addr, key->plen)) {
#ifdef CONFIG_IPV6_SUBTREES
- if (fn->subtree)
- fn = fib6_lookup_1(fn->subtree, args + 1);
+ if (fn->subtree) {
+ struct fib6_node *sfn;
+ sfn = fib6_lookup_1(fn->subtree,
+ args + 1);
+ if (!sfn)
+ goto backtrack;
+ fn = sfn;
+ }
#endif
- if (!fn || fn->fn_flags & RTN_RTINFO)
+ if (fn->fn_flags & RTN_RTINFO)
return fn;
}
}
-
+#ifdef CONFIG_IPV6_SUBTREES
+backtrack:
+#endif
if (fn->fn_flags & RTN_ROOT)
break;
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 79aa965..04d31c2 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -1369,8 +1369,10 @@
if (!ndisc_parse_options(msg->opt, ndoptlen, &ndopts))
return;
- if (!ndopts.nd_opts_rh)
+ if (!ndopts.nd_opts_rh) {
+ ip6_redirect_no_header(skb, dev_net(skb->dev), 0, 0);
return;
+ }
hdr = (u8 *)ndopts.nd_opts_rh;
hdr += 8;
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index 790d9f4..1aeb473 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -490,6 +490,7 @@
ipv6_hdr(head)->payload_len = htons(payload_len);
ipv6_change_dsfield(ipv6_hdr(head), 0xff, ecn);
IP6CB(head)->nhoff = nhoff;
+ IP6CB(head)->flags |= IP6SKB_FRAGMENTED;
/* Yes, and fold redundant checksum back. 8) */
if (head->ip_summed == CHECKSUM_COMPLETE)
@@ -524,6 +525,9 @@
struct net *net = dev_net(skb_dst(skb)->dev);
int evicted;
+ if (IP6CB(skb)->flags & IP6SKB_FRAGMENTED)
+ goto fail_hdr;
+
IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMREQDS);
/* Jumbo payload inhibits frag. header */
@@ -544,6 +548,7 @@
ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMOKS);
IP6CB(skb)->nhoff = (u8 *)fhdr - skb_network_header(skb);
+ IP6CB(skb)->flags |= IP6SKB_FRAGMENTED;
return 1;
}
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index b70f8979..8d9a93e 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -1178,6 +1178,27 @@
}
EXPORT_SYMBOL_GPL(ip6_redirect);
+void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif,
+ u32 mark)
+{
+ const struct ipv6hdr *iph = ipv6_hdr(skb);
+ const struct rd_msg *msg = (struct rd_msg *)icmp6_hdr(skb);
+ struct dst_entry *dst;
+ struct flowi6 fl6;
+
+ memset(&fl6, 0, sizeof(fl6));
+ fl6.flowi6_oif = oif;
+ fl6.flowi6_mark = mark;
+ fl6.flowi6_flags = 0;
+ fl6.daddr = msg->dest;
+ fl6.saddr = iph->daddr;
+
+ dst = ip6_route_output(net, NULL, &fl6);
+ if (!dst->error)
+ rt6_do_redirect(dst, NULL, skb);
+ dst_release(dst);
+}
+
void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk)
{
ip6_redirect(skb, sock_net(sk), sk->sk_bound_dev_if, sk->sk_mark);
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index ae31968..cc9e02d 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -31,10 +31,12 @@
#include "led.h"
#define IEEE80211_AUTH_TIMEOUT (HZ / 5)
+#define IEEE80211_AUTH_TIMEOUT_LONG (HZ / 2)
#define IEEE80211_AUTH_TIMEOUT_SHORT (HZ / 10)
#define IEEE80211_AUTH_MAX_TRIES 3
#define IEEE80211_AUTH_WAIT_ASSOC (HZ * 5)
#define IEEE80211_ASSOC_TIMEOUT (HZ / 5)
+#define IEEE80211_ASSOC_TIMEOUT_LONG (HZ / 2)
#define IEEE80211_ASSOC_TIMEOUT_SHORT (HZ / 10)
#define IEEE80211_ASSOC_MAX_TRIES 3
@@ -209,8 +211,9 @@
struct ieee80211_channel *channel,
const struct ieee80211_ht_operation *ht_oper,
const struct ieee80211_vht_operation *vht_oper,
- struct cfg80211_chan_def *chandef, bool verbose)
+ struct cfg80211_chan_def *chandef, bool tracking)
{
+ struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
struct cfg80211_chan_def vht_chandef;
u32 ht_cfreq, ret;
@@ -229,7 +232,7 @@
ht_cfreq = ieee80211_channel_to_frequency(ht_oper->primary_chan,
channel->band);
/* check that channel matches the right operating channel */
- if (channel->center_freq != ht_cfreq) {
+ if (!tracking && channel->center_freq != ht_cfreq) {
/*
* It's possible that some APs are confused here;
* Netgear WNDR3700 sometimes reports 4 higher than
@@ -237,11 +240,10 @@
* since we look at probe response/beacon data here
* it should be OK.
*/
- if (verbose)
- sdata_info(sdata,
- "Wrong control channel: center-freq: %d ht-cfreq: %d ht->primary_chan: %d band: %d - Disabling HT\n",
- channel->center_freq, ht_cfreq,
- ht_oper->primary_chan, channel->band);
+ sdata_info(sdata,
+ "Wrong control channel: center-freq: %d ht-cfreq: %d ht->primary_chan: %d band: %d - Disabling HT\n",
+ channel->center_freq, ht_cfreq,
+ ht_oper->primary_chan, channel->band);
ret = IEEE80211_STA_DISABLE_HT | IEEE80211_STA_DISABLE_VHT;
goto out;
}
@@ -295,7 +297,7 @@
channel->band);
break;
default:
- if (verbose)
+ if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT))
sdata_info(sdata,
"AP VHT operation IE has invalid channel width (%d), disable VHT\n",
vht_oper->chan_width);
@@ -304,7 +306,7 @@
}
if (!cfg80211_chandef_valid(&vht_chandef)) {
- if (verbose)
+ if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT))
sdata_info(sdata,
"AP VHT information is invalid, disable VHT\n");
ret = IEEE80211_STA_DISABLE_VHT;
@@ -317,7 +319,7 @@
}
if (!cfg80211_chandef_compatible(chandef, &vht_chandef)) {
- if (verbose)
+ if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT))
sdata_info(sdata,
"AP VHT information doesn't match HT, disable VHT\n");
ret = IEEE80211_STA_DISABLE_VHT;
@@ -333,18 +335,27 @@
if (ret & IEEE80211_STA_DISABLE_VHT)
vht_chandef = *chandef;
+ /*
+ * Ignore the DISABLED flag when we're already connected and only
+ * tracking the APs beacon for bandwidth changes - otherwise we
+ * might get disconnected here if we connect to an AP, update our
+ * regulatory information based on the AP's country IE and the
+ * information we have is wrong/outdated and disables the channel
+ * that we're actually using for the connection to the AP.
+ */
while (!cfg80211_chandef_usable(sdata->local->hw.wiphy, chandef,
- IEEE80211_CHAN_DISABLED)) {
+ tracking ? 0 :
+ IEEE80211_CHAN_DISABLED)) {
if (WARN_ON(chandef->width == NL80211_CHAN_WIDTH_20_NOHT)) {
ret = IEEE80211_STA_DISABLE_HT |
IEEE80211_STA_DISABLE_VHT;
- goto out;
+ break;
}
ret |= chandef_downgrade(chandef);
}
- if (chandef->width != vht_chandef.width && verbose)
+ if (chandef->width != vht_chandef.width && !tracking)
sdata_info(sdata,
"capabilities/regulatory prevented using AP HT/VHT configuration, downgraded\n");
@@ -384,7 +395,7 @@
/* calculate new channel (type) based on HT/VHT operation IEs */
flags = ieee80211_determine_chantype(sdata, sband, chan, ht_oper,
- vht_oper, &chandef, false);
+ vht_oper, &chandef, true);
/*
* Downgrade the new channel if we associated with restricted
@@ -3394,10 +3405,13 @@
if (tx_flags == 0) {
auth_data->timeout = jiffies + IEEE80211_AUTH_TIMEOUT;
- ifmgd->auth_data->timeout_started = true;
+ auth_data->timeout_started = true;
run_again(sdata, auth_data->timeout);
} else {
- auth_data->timeout_started = false;
+ auth_data->timeout =
+ round_jiffies_up(jiffies + IEEE80211_AUTH_TIMEOUT_LONG);
+ auth_data->timeout_started = true;
+ run_again(sdata, auth_data->timeout);
}
return 0;
@@ -3434,7 +3448,11 @@
assoc_data->timeout_started = true;
run_again(sdata, assoc_data->timeout);
} else {
- assoc_data->timeout_started = false;
+ assoc_data->timeout =
+ round_jiffies_up(jiffies +
+ IEEE80211_ASSOC_TIMEOUT_LONG);
+ assoc_data->timeout_started = true;
+ run_again(sdata, assoc_data->timeout);
}
return 0;
@@ -3829,7 +3847,7 @@
ifmgd->flags |= ieee80211_determine_chantype(sdata, sband,
cbss->channel,
ht_oper, vht_oper,
- &chandef, true);
+ &chandef, false);
sdata->needed_rx_chains = min(ieee80211_ht_vht_rx_chains(sdata, cbss),
local->rx_chains);
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index 7dcc376..2f80107 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -526,7 +526,7 @@
const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple;
__u32 seq, ack, sack, end, win, swin;
s16 receiver_offset;
- bool res;
+ bool res, in_recv_win;
/*
* Get the required data from the packet.
@@ -649,14 +649,18 @@
receiver->td_end, receiver->td_maxend, receiver->td_maxwin,
receiver->td_scale);
+ /* Is the ending sequence in the receive window (if available)? */
+ in_recv_win = !receiver->td_maxwin ||
+ after(end, sender->td_end - receiver->td_maxwin - 1);
+
pr_debug("tcp_in_window: I=%i II=%i III=%i IV=%i\n",
before(seq, sender->td_maxend + 1),
- after(end, sender->td_end - receiver->td_maxwin - 1),
+ (in_recv_win ? 1 : 0),
before(sack, receiver->td_end + 1),
after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1));
if (before(seq, sender->td_maxend + 1) &&
- after(end, sender->td_end - receiver->td_maxwin - 1) &&
+ in_recv_win &&
before(sack, receiver->td_end + 1) &&
after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1)) {
/*
@@ -725,7 +729,7 @@
nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL,
"nf_ct_tcp: %s ",
before(seq, sender->td_maxend + 1) ?
- after(end, sender->td_end - receiver->td_maxwin - 1) ?
+ in_recv_win ?
before(sack, receiver->td_end + 1) ?
after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1) ? "BUG"
: "ACK is under the lower bound (possible overly delayed ACK)"
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index 962e979..d92cc31 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -419,6 +419,7 @@
nfmsg->version = NFNETLINK_V0;
nfmsg->res_id = htons(inst->group_num);
+ memset(&pmsg, 0, sizeof(pmsg));
pmsg.hw_protocol = skb->protocol;
pmsg.hook = hooknum;
@@ -498,7 +499,10 @@
if (indev && skb->dev &&
skb->mac_header != skb->network_header) {
struct nfulnl_msg_packet_hw phw;
- int len = dev_parse_header(skb, phw.hw_addr);
+ int len;
+
+ memset(&phw, 0, sizeof(phw));
+ len = dev_parse_header(skb, phw.hw_addr);
if (len > 0) {
phw.hw_addrlen = htons(len);
if (nla_put(inst->skb, NFULA_HWADDR, sizeof(phw), &phw))
diff --git a/net/netfilter/nfnetlink_queue_core.c b/net/netfilter/nfnetlink_queue_core.c
index 971ea14..8a703c3 100644
--- a/net/netfilter/nfnetlink_queue_core.c
+++ b/net/netfilter/nfnetlink_queue_core.c
@@ -463,7 +463,10 @@
if (indev && entskb->dev &&
entskb->mac_header != entskb->network_header) {
struct nfqnl_msg_packet_hw phw;
- int len = dev_parse_header(entskb, phw.hw_addr);
+ int len;
+
+ memset(&phw, 0, sizeof(phw));
+ len = dev_parse_header(entskb, phw.hw_addr);
if (len) {
phw.hw_addrlen = htons(len);
if (nla_put(skb, NFQA_HWADDR, sizeof(phw), &phw))
diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c
index 7011c71..6113cc7 100644
--- a/net/netfilter/xt_TCPMSS.c
+++ b/net/netfilter/xt_TCPMSS.c
@@ -52,7 +52,8 @@
{
const struct xt_tcpmss_info *info = par->targinfo;
struct tcphdr *tcph;
- unsigned int tcplen, i;
+ int len, tcp_hdrlen;
+ unsigned int i;
__be16 oldval;
u16 newmss;
u8 *opt;
@@ -64,11 +65,14 @@
if (!skb_make_writable(skb, skb->len))
return -1;
- tcplen = skb->len - tcphoff;
- tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff);
+ len = skb->len - tcphoff;
+ if (len < (int)sizeof(struct tcphdr))
+ return -1;
- /* Header cannot be larger than the packet */
- if (tcplen < tcph->doff*4)
+ tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff);
+ tcp_hdrlen = tcph->doff * 4;
+
+ if (len < tcp_hdrlen)
return -1;
if (info->mss == XT_TCPMSS_CLAMP_PMTU) {
@@ -87,9 +91,8 @@
newmss = info->mss;
opt = (u_int8_t *)tcph;
- for (i = sizeof(struct tcphdr); i < tcph->doff*4; i += optlen(opt, i)) {
- if (opt[i] == TCPOPT_MSS && tcph->doff*4 - i >= TCPOLEN_MSS &&
- opt[i+1] == TCPOLEN_MSS) {
+ for (i = sizeof(struct tcphdr); i <= tcp_hdrlen - TCPOLEN_MSS; i += optlen(opt, i)) {
+ if (opt[i] == TCPOPT_MSS && opt[i+1] == TCPOLEN_MSS) {
u_int16_t oldmss;
oldmss = (opt[i+2] << 8) | opt[i+3];
@@ -112,9 +115,10 @@
}
/* There is data after the header so the option can't be added
- without moving it, and doing so may make the SYN packet
- itself too large. Accept the packet unmodified instead. */
- if (tcplen > tcph->doff*4)
+ * without moving it, and doing so may make the SYN packet
+ * itself too large. Accept the packet unmodified instead.
+ */
+ if (len > tcp_hdrlen)
return 0;
/*
@@ -143,10 +147,10 @@
newmss = min(newmss, (u16)1220);
opt = (u_int8_t *)tcph + sizeof(struct tcphdr);
- memmove(opt + TCPOLEN_MSS, opt, tcplen - sizeof(struct tcphdr));
+ memmove(opt + TCPOLEN_MSS, opt, len - sizeof(struct tcphdr));
inet_proto_csum_replace2(&tcph->check, skb,
- htons(tcplen), htons(tcplen + TCPOLEN_MSS), 1);
+ htons(len), htons(len + TCPOLEN_MSS), 1);
opt[0] = TCPOPT_MSS;
opt[1] = TCPOLEN_MSS;
opt[2] = (newmss & 0xff00) >> 8;
diff --git a/net/netfilter/xt_TCPOPTSTRIP.c b/net/netfilter/xt_TCPOPTSTRIP.c
index b68fa19..625fa1d 100644
--- a/net/netfilter/xt_TCPOPTSTRIP.c
+++ b/net/netfilter/xt_TCPOPTSTRIP.c
@@ -38,7 +38,7 @@
struct tcphdr *tcph;
u_int16_t n, o;
u_int8_t *opt;
- int len;
+ int len, tcp_hdrlen;
/* This is a fragment, no TCP header is available */
if (par->fragoff != 0)
@@ -52,7 +52,9 @@
return NF_DROP;
tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff);
- if (tcph->doff * 4 > len)
+ tcp_hdrlen = tcph->doff * 4;
+
+ if (len < tcp_hdrlen)
return NF_DROP;
opt = (u_int8_t *)tcph;
@@ -61,10 +63,10 @@
* Walk through all TCP options - if we find some option to remove,
* set all octets to %TCPOPT_NOP and adjust checksum.
*/
- for (i = sizeof(struct tcphdr); i < tcp_hdrlen(skb); i += optl) {
+ for (i = sizeof(struct tcphdr); i < tcp_hdrlen - 1; i += optl) {
optl = optlen(opt, i);
- if (i + optl > tcp_hdrlen(skb))
+ if (i + optl > tcp_hdrlen)
break;
if (!tcpoptstrip_test_bit(info->strip_bmap, opt[i]))
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index 22c5f39..ab101f7 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -535,6 +535,7 @@
{
struct sw_flow_actions *acts = rcu_dereference(OVS_CB(skb)->flow->sf_acts);
+ OVS_CB(skb)->tun_key = NULL;
return do_execute_actions(dp, skb, acts->actions,
acts->actions_len, false);
}
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index f7e3a0d..f2ed760 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -2076,9 +2076,6 @@
ovs_notify(reply, info, &ovs_dp_vport_multicast_group);
return 0;
- rtnl_unlock();
- return 0;
-
exit_free:
kfree_skb(reply);
exit_unlock:
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index 5c519b1..1aa84dc 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -240,7 +240,7 @@
struct flex_array *buckets;
int i, err;
- buckets = flex_array_alloc(sizeof(struct hlist_head *),
+ buckets = flex_array_alloc(sizeof(struct hlist_head),
n_buckets, GFP_KERNEL);
if (!buckets)
return NULL;
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 4b66c75..75c8bbf 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -3259,9 +3259,11 @@
if (po->tp_version == TPACKET_V3) {
lv = sizeof(struct tpacket_stats_v3);
+ st.stats3.tp_packets += st.stats3.tp_drops;
data = &st.stats3;
} else {
lv = sizeof(struct tpacket_stats);
+ st.stats1.tp_packets += st.stats1.tp_drops;
data = &st.stats1;
}
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 281c1bd..51b968d 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -285,6 +285,45 @@
return q;
}
+/* The linklayer setting were not transferred from iproute2, in older
+ * versions, and the rate tables lookup systems have been dropped in
+ * the kernel. To keep backward compatible with older iproute2 tc
+ * utils, we detect the linklayer setting by detecting if the rate
+ * table were modified.
+ *
+ * For linklayer ATM table entries, the rate table will be aligned to
+ * 48 bytes, thus some table entries will contain the same value. The
+ * mpu (min packet unit) is also encoded into the old rate table, thus
+ * starting from the mpu, we find low and high table entries for
+ * mapping this cell. If these entries contain the same value, when
+ * the rate tables have been modified for linklayer ATM.
+ *
+ * This is done by rounding mpu to the nearest 48 bytes cell/entry,
+ * and then roundup to the next cell, calc the table entry one below,
+ * and compare.
+ */
+static __u8 __detect_linklayer(struct tc_ratespec *r, __u32 *rtab)
+{
+ int low = roundup(r->mpu, 48);
+ int high = roundup(low+1, 48);
+ int cell_low = low >> r->cell_log;
+ int cell_high = (high >> r->cell_log) - 1;
+
+ /* rtab is too inaccurate at rates > 100Mbit/s */
+ if ((r->rate > (100000000/8)) || (rtab[0] == 0)) {
+ pr_debug("TC linklayer: Giving up ATM detection\n");
+ return TC_LINKLAYER_ETHERNET;
+ }
+
+ if ((cell_high > cell_low) && (cell_high < 256)
+ && (rtab[cell_low] == rtab[cell_high])) {
+ pr_debug("TC linklayer: Detected ATM, low(%d)=high(%d)=%u\n",
+ cell_low, cell_high, rtab[cell_high]);
+ return TC_LINKLAYER_ATM;
+ }
+ return TC_LINKLAYER_ETHERNET;
+}
+
static struct qdisc_rate_table *qdisc_rtab_list;
struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, struct nlattr *tab)
@@ -308,6 +347,8 @@
rtab->rate = *r;
rtab->refcnt = 1;
memcpy(rtab->data, nla_data(tab), 1024);
+ if (r->linklayer == TC_LINKLAYER_UNAWARE)
+ r->linklayer = __detect_linklayer(r, rtab->data);
rtab->next = qdisc_rtab_list;
qdisc_rtab_list = rtab;
}
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 4626cef..48be3d5 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -25,6 +25,7 @@
#include <linux/rcupdate.h>
#include <linux/list.h>
#include <linux/slab.h>
+#include <linux/if_vlan.h>
#include <net/sch_generic.h>
#include <net/pkt_sched.h>
#include <net/dst.h>
@@ -207,15 +208,19 @@
unsigned long dev_trans_start(struct net_device *dev)
{
- unsigned long val, res = dev->trans_start;
+ unsigned long val, res;
unsigned int i;
+ if (is_vlan_dev(dev))
+ dev = vlan_dev_real_dev(dev);
+ res = dev->trans_start;
for (i = 0; i < dev->num_tx_queues; i++) {
val = netdev_get_tx_queue(dev, i)->trans_start;
if (val && time_after(val, res))
res = val;
}
dev->trans_start = res;
+
return res;
}
EXPORT_SYMBOL(dev_trans_start);
@@ -904,6 +909,7 @@
memset(r, 0, sizeof(*r));
r->overhead = conf->overhead;
r->rate_bytes_ps = conf->rate;
+ r->linklayer = (conf->linklayer & TC_LINKLAYER_MASK);
r->mult = 1;
/*
* The deal here is to replace a divide by a reciprocal one
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 45e7515..c2178b1 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -1329,6 +1329,7 @@
struct htb_sched *q = qdisc_priv(sch);
struct htb_class *cl = (struct htb_class *)*arg, *parent;
struct nlattr *opt = tca[TCA_OPTIONS];
+ struct qdisc_rate_table *rtab = NULL, *ctab = NULL;
struct nlattr *tb[TCA_HTB_MAX + 1];
struct tc_htb_opt *hopt;
@@ -1350,6 +1351,18 @@
if (!hopt->rate.rate || !hopt->ceil.rate)
goto failure;
+ /* Keeping backward compatible with rate_table based iproute2 tc */
+ if (hopt->rate.linklayer == TC_LINKLAYER_UNAWARE) {
+ rtab = qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB]);
+ if (rtab)
+ qdisc_put_rtab(rtab);
+ }
+ if (hopt->ceil.linklayer == TC_LINKLAYER_UNAWARE) {
+ ctab = qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB]);
+ if (ctab)
+ qdisc_put_rtab(ctab);
+ }
+
if (!cl) { /* new class */
struct Qdisc *new_q;
int prio;
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index bce5b79..ab67efc 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -846,12 +846,12 @@
else
spc_state = SCTP_ADDR_AVAILABLE;
/* Don't inform ULP about transition from PF to
- * active state and set cwnd to 1, see SCTP
+ * active state and set cwnd to 1 MTU, see SCTP
* Quick failover draft section 5.1, point 5
*/
if (transport->state == SCTP_PF) {
ulp_notify = false;
- transport->cwnd = 1;
+ transport->cwnd = asoc->pathmtu;
}
transport->state = SCTP_ACTIVE;
break;
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index bdbbc3f..8fdd160 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -181,12 +181,12 @@
return;
}
- call_rcu(&transport->rcu, sctp_transport_destroy_rcu);
-
sctp_packet_free(&transport->packet);
if (transport->asoc)
sctp_association_put(transport->asoc);
+
+ call_rcu(&transport->rcu, sctp_transport_destroy_rcu);
}
/* Start T3_rtx timer if it is not already running and update the heartbeat
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 74f6a70..ecbc4e3 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -1660,6 +1660,10 @@
task->tk_action = call_connect_status;
if (task->tk_status < 0)
return;
+ if (task->tk_flags & RPC_TASK_NOCONNECT) {
+ rpc_exit(task, -ENOTCONN);
+ return;
+ }
xprt_connect(task);
}
}
diff --git a/net/sunrpc/netns.h b/net/sunrpc/netns.h
index 74d948f..779742c 100644
--- a/net/sunrpc/netns.h
+++ b/net/sunrpc/netns.h
@@ -23,6 +23,7 @@
struct rpc_clnt *rpcb_local_clnt4;
spinlock_t rpcb_clnt_lock;
unsigned int rpcb_users;
+ unsigned int rpcb_is_af_local : 1;
struct mutex gssp_lock;
wait_queue_head_t gssp_wq;
diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
index 3df764d..1891a10 100644
--- a/net/sunrpc/rpcb_clnt.c
+++ b/net/sunrpc/rpcb_clnt.c
@@ -204,13 +204,15 @@
}
static void rpcb_set_local(struct net *net, struct rpc_clnt *clnt,
- struct rpc_clnt *clnt4)
+ struct rpc_clnt *clnt4,
+ bool is_af_local)
{
struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
/* Protected by rpcb_create_local_mutex */
sn->rpcb_local_clnt = clnt;
sn->rpcb_local_clnt4 = clnt4;
+ sn->rpcb_is_af_local = is_af_local ? 1 : 0;
smp_wmb();
sn->rpcb_users = 1;
dprintk("RPC: created new rpcb local clients (rpcb_local_clnt: "
@@ -238,6 +240,14 @@
.program = &rpcb_program,
.version = RPCBVERS_2,
.authflavor = RPC_AUTH_NULL,
+ /*
+ * We turn off the idle timeout to prevent the kernel
+ * from automatically disconnecting the socket.
+ * Otherwise, we'd have to cache the mount namespace
+ * of the caller and somehow pass that to the socket
+ * reconnect code.
+ */
+ .flags = RPC_CLNT_CREATE_NO_IDLE_TIMEOUT,
};
struct rpc_clnt *clnt, *clnt4;
int result = 0;
@@ -263,7 +273,7 @@
clnt4 = NULL;
}
- rpcb_set_local(net, clnt, clnt4);
+ rpcb_set_local(net, clnt, clnt4, true);
out:
return result;
@@ -315,7 +325,7 @@
clnt4 = NULL;
}
- rpcb_set_local(net, clnt, clnt4);
+ rpcb_set_local(net, clnt, clnt4, false);
out:
return result;
@@ -376,13 +386,16 @@
return rpc_create(&args);
}
-static int rpcb_register_call(struct rpc_clnt *clnt, struct rpc_message *msg)
+static int rpcb_register_call(struct sunrpc_net *sn, struct rpc_clnt *clnt, struct rpc_message *msg, bool is_set)
{
- int result, error = 0;
+ int flags = RPC_TASK_NOCONNECT;
+ int error, result = 0;
+ if (is_set || !sn->rpcb_is_af_local)
+ flags = RPC_TASK_SOFTCONN;
msg->rpc_resp = &result;
- error = rpc_call_sync(clnt, msg, RPC_TASK_SOFTCONN);
+ error = rpc_call_sync(clnt, msg, flags);
if (error < 0) {
dprintk("RPC: failed to contact local rpcbind "
"server (errno %d).\n", -error);
@@ -439,16 +452,19 @@
.rpc_argp = &map,
};
struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
+ bool is_set = false;
dprintk("RPC: %sregistering (%u, %u, %d, %u) with local "
"rpcbind\n", (port ? "" : "un"),
prog, vers, prot, port);
msg.rpc_proc = &rpcb_procedures2[RPCBPROC_UNSET];
- if (port)
+ if (port != 0) {
msg.rpc_proc = &rpcb_procedures2[RPCBPROC_SET];
+ is_set = true;
+ }
- return rpcb_register_call(sn->rpcb_local_clnt, &msg);
+ return rpcb_register_call(sn, sn->rpcb_local_clnt, &msg, is_set);
}
/*
@@ -461,6 +477,7 @@
const struct sockaddr_in *sin = (const struct sockaddr_in *)sap;
struct rpcbind_args *map = msg->rpc_argp;
unsigned short port = ntohs(sin->sin_port);
+ bool is_set = false;
int result;
map->r_addr = rpc_sockaddr2uaddr(sap, GFP_KERNEL);
@@ -471,10 +488,12 @@
map->r_addr, map->r_netid);
msg->rpc_proc = &rpcb_procedures4[RPCBPROC_UNSET];
- if (port)
+ if (port != 0) {
msg->rpc_proc = &rpcb_procedures4[RPCBPROC_SET];
+ is_set = true;
+ }
- result = rpcb_register_call(sn->rpcb_local_clnt4, msg);
+ result = rpcb_register_call(sn, sn->rpcb_local_clnt4, msg, is_set);
kfree(map->r_addr);
return result;
}
@@ -489,6 +508,7 @@
const struct sockaddr_in6 *sin6 = (const struct sockaddr_in6 *)sap;
struct rpcbind_args *map = msg->rpc_argp;
unsigned short port = ntohs(sin6->sin6_port);
+ bool is_set = false;
int result;
map->r_addr = rpc_sockaddr2uaddr(sap, GFP_KERNEL);
@@ -499,10 +519,12 @@
map->r_addr, map->r_netid);
msg->rpc_proc = &rpcb_procedures4[RPCBPROC_UNSET];
- if (port)
+ if (port != 0) {
msg->rpc_proc = &rpcb_procedures4[RPCBPROC_SET];
+ is_set = true;
+ }
- result = rpcb_register_call(sn->rpcb_local_clnt4, msg);
+ result = rpcb_register_call(sn, sn->rpcb_local_clnt4, msg, is_set);
kfree(map->r_addr);
return result;
}
@@ -519,7 +541,7 @@
map->r_addr = "";
msg->rpc_proc = &rpcb_procedures4[RPCBPROC_UNSET];
- return rpcb_register_call(sn->rpcb_local_clnt4, msg);
+ return rpcb_register_call(sn, sn->rpcb_local_clnt4, msg, false);
}
/**
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index cb29ef7..609c30c 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -460,6 +460,7 @@
{
struct tipc_link *l_ptr;
struct tipc_link *temp_l_ptr;
+ struct tipc_link_req *temp_req;
pr_info("Disabling bearer <%s>\n", b_ptr->name);
spin_lock_bh(&b_ptr->lock);
@@ -468,9 +469,13 @@
list_for_each_entry_safe(l_ptr, temp_l_ptr, &b_ptr->links, link_list) {
tipc_link_delete(l_ptr);
}
- if (b_ptr->link_req)
- tipc_disc_delete(b_ptr->link_req);
+ temp_req = b_ptr->link_req;
+ b_ptr->link_req = NULL;
spin_unlock_bh(&b_ptr->lock);
+
+ if (temp_req)
+ tipc_disc_delete(temp_req);
+
memset(b_ptr, 0, sizeof(struct tipc_bearer));
}
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
index 593071d..4d93346 100644
--- a/net/vmw_vsock/af_vsock.c
+++ b/net/vmw_vsock/af_vsock.c
@@ -347,7 +347,7 @@
for (i = 0; i < ARRAY_SIZE(vsock_connected_table); i++) {
struct vsock_sock *vsk;
list_for_each_entry(vsk, &vsock_connected_table[i],
- connected_table);
+ connected_table)
fn(sk_vsock(vsk));
}
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 4f9f216..a8c29fa 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -765,6 +765,7 @@
cfg80211_leave_mesh(rdev, dev);
break;
case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_P2P_GO:
cfg80211_stop_ap(rdev, dev);
break;
default:
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 25d217d9..5f6e982 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -441,10 +441,12 @@
goto out_unlock;
}
*rdev = wiphy_to_dev((*wdev)->wiphy);
- cb->args[0] = (*rdev)->wiphy_idx;
+ /* 0 is the first index - add 1 to parse only once */
+ cb->args[0] = (*rdev)->wiphy_idx + 1;
cb->args[1] = (*wdev)->identifier;
} else {
- struct wiphy *wiphy = wiphy_idx_to_wiphy(cb->args[0]);
+ /* subtract the 1 again here */
+ struct wiphy *wiphy = wiphy_idx_to_wiphy(cb->args[0] - 1);
struct wireless_dev *tmp;
if (!wiphy) {
@@ -2620,8 +2622,8 @@
hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0,
NL80211_CMD_NEW_KEY);
- if (IS_ERR(hdr))
- return PTR_ERR(hdr);
+ if (!hdr)
+ return -ENOBUFS;
cookie.msg = msg;
cookie.idx = key_idx;
@@ -6505,6 +6507,9 @@
NL80211_CMD_TESTMODE);
struct nlattr *tmdata;
+ if (!hdr)
+ break;
+
if (nla_put_u32(skb, NL80211_ATTR_WIPHY, phy_idx)) {
genlmsg_cancel(skb, hdr);
break;
@@ -6949,9 +6954,8 @@
hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0,
NL80211_CMD_REMAIN_ON_CHANNEL);
-
- if (IS_ERR(hdr)) {
- err = PTR_ERR(hdr);
+ if (!hdr) {
+ err = -ENOBUFS;
goto free_msg;
}
@@ -7249,9 +7253,8 @@
hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0,
NL80211_CMD_FRAME);
-
- if (IS_ERR(hdr)) {
- err = PTR_ERR(hdr);
+ if (!hdr) {
+ err = -ENOBUFS;
goto free_msg;
}
}
@@ -8130,9 +8133,8 @@
hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0,
NL80211_CMD_PROBE_CLIENT);
-
- if (IS_ERR(hdr)) {
- err = PTR_ERR(hdr);
+ if (!hdr) {
+ err = -ENOBUFS;
goto free_msg;
}
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index 81c8a10..20e86a9 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -976,21 +976,19 @@
struct net_device *dev, u16 reason, bool wextev)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
- int err;
+ int err = 0;
ASSERT_WDEV_LOCK(wdev);
kfree(wdev->connect_keys);
wdev->connect_keys = NULL;
- if (wdev->conn) {
+ if (wdev->conn)
err = cfg80211_sme_disconnect(wdev, reason);
- } else if (!rdev->ops->disconnect) {
+ else if (!rdev->ops->disconnect)
cfg80211_mlme_down(rdev, dev);
- err = 0;
- } else {
+ else if (wdev->current_bss)
err = rdev_disconnect(rdev, dev, reason);
- }
return err;
}
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index 3f7682a..eefbd10 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -1998,12 +1998,11 @@
*
* Create or update the port list entry
*/
-static int smk_ipv6_port_check(struct sock *sk, struct sockaddr *address,
+static int smk_ipv6_port_check(struct sock *sk, struct sockaddr_in6 *address,
int act)
{
__be16 *bep;
__be32 *be32p;
- struct sockaddr_in6 *addr6;
struct smk_port_label *spp;
struct socket_smack *ssp = sk->sk_security;
struct smack_known *skp;
@@ -2025,10 +2024,9 @@
/*
* Get the IP address and port from the address.
*/
- addr6 = (struct sockaddr_in6 *)address;
- port = ntohs(addr6->sin6_port);
- bep = (__be16 *)(&addr6->sin6_addr);
- be32p = (__be32 *)(&addr6->sin6_addr);
+ port = ntohs(address->sin6_port);
+ bep = (__be16 *)(&address->sin6_addr);
+ be32p = (__be32 *)(&address->sin6_addr);
/*
* It's remote, so port lookup does no good.
@@ -2060,9 +2058,9 @@
ad.a.u.net->family = sk->sk_family;
ad.a.u.net->dport = port;
if (act == SMK_RECEIVING)
- ad.a.u.net->v6info.saddr = addr6->sin6_addr;
+ ad.a.u.net->v6info.saddr = address->sin6_addr;
else
- ad.a.u.net->v6info.daddr = addr6->sin6_addr;
+ ad.a.u.net->v6info.daddr = address->sin6_addr;
#endif
return smk_access(skp, object, MAY_WRITE, &ad);
}
@@ -2201,7 +2199,8 @@
case PF_INET6:
if (addrlen < sizeof(struct sockaddr_in6))
return -EINVAL;
- rc = smk_ipv6_port_check(sock->sk, sap, SMK_CONNECTING);
+ rc = smk_ipv6_port_check(sock->sk, (struct sockaddr_in6 *)sap,
+ SMK_CONNECTING);
break;
}
return rc;
@@ -3034,7 +3033,7 @@
int size)
{
struct sockaddr_in *sip = (struct sockaddr_in *) msg->msg_name;
- struct sockaddr *sap = (struct sockaddr *) msg->msg_name;
+ struct sockaddr_in6 *sap = (struct sockaddr_in6 *) msg->msg_name;
int rc = 0;
/*
@@ -3121,9 +3120,8 @@
return smack_net_ambient;
}
-static int smk_skb_to_addr_ipv6(struct sk_buff *skb, struct sockaddr *sap)
+static int smk_skb_to_addr_ipv6(struct sk_buff *skb, struct sockaddr_in6 *sip)
{
- struct sockaddr_in6 *sip = (struct sockaddr_in6 *)sap;
u8 nexthdr;
int offset;
int proto = -EINVAL;
@@ -3181,7 +3179,7 @@
struct netlbl_lsm_secattr secattr;
struct socket_smack *ssp = sk->sk_security;
struct smack_known *skp;
- struct sockaddr sadd;
+ struct sockaddr_in6 sadd;
int rc = 0;
struct smk_audit_info ad;
#ifdef CONFIG_AUDIT
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
index 8e77cbb..e3c7ba8 100644
--- a/sound/pci/hda/hda_generic.c
+++ b/sound/pci/hda/hda_generic.c
@@ -522,7 +522,7 @@
}
#define nid_has_mute(codec, nid, dir) \
- check_amp_caps(codec, nid, dir, AC_AMPCAP_MUTE)
+ check_amp_caps(codec, nid, dir, (AC_AMPCAP_MUTE | AC_AMPCAP_MIN_MUTE))
#define nid_has_volume(codec, nid, dir) \
check_amp_caps(codec, nid, dir, AC_AMPCAP_NUM_STEPS)
@@ -624,7 +624,7 @@
if (enable)
val = (caps & AC_AMPCAP_OFFSET) >> AC_AMPCAP_OFFSET_SHIFT;
}
- if (caps & AC_AMPCAP_MUTE) {
+ if (caps & (AC_AMPCAP_MUTE | AC_AMPCAP_MIN_MUTE)) {
if (!enable)
val |= HDA_AMP_MUTE;
}
@@ -648,7 +648,7 @@
{
unsigned int mask = 0xff;
- if (caps & AC_AMPCAP_MUTE) {
+ if (caps & (AC_AMPCAP_MUTE | AC_AMPCAP_MIN_MUTE)) {
if (is_ctl_associated(codec, nid, dir, idx, NID_PATH_MUTE_CTL))
mask &= ~0x80;
}
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 8bd2261..f303cd8 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -1031,6 +1031,7 @@
ALC880_FIXUP_GPIO2,
ALC880_FIXUP_MEDION_RIM,
ALC880_FIXUP_LG,
+ ALC880_FIXUP_LG_LW25,
ALC880_FIXUP_W810,
ALC880_FIXUP_EAPD_COEF,
ALC880_FIXUP_TCL_S700,
@@ -1089,6 +1090,14 @@
{ }
}
},
+ [ALC880_FIXUP_LG_LW25] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+ { 0x1a, 0x0181344f }, /* line-in */
+ { 0x1b, 0x0321403f }, /* headphone */
+ { }
+ }
+ },
[ALC880_FIXUP_W810] = {
.type = HDA_FIXUP_PINS,
.v.pins = (const struct hda_pintbl[]) {
@@ -1341,6 +1350,7 @@
SND_PCI_QUIRK(0x1854, 0x003b, "LG", ALC880_FIXUP_LG),
SND_PCI_QUIRK(0x1854, 0x005f, "LG P1 Express", ALC880_FIXUP_LG),
SND_PCI_QUIRK(0x1854, 0x0068, "LG w1", ALC880_FIXUP_LG),
+ SND_PCI_QUIRK(0x1854, 0x0077, "LG LW25", ALC880_FIXUP_LG_LW25),
SND_PCI_QUIRK(0x19db, 0x4188, "TCL S700", ALC880_FIXUP_TCL_S700),
/* Below is the copied entries from alc880_quirks.c.
@@ -4329,6 +4339,7 @@
SND_PCI_QUIRK(0x1025, 0x0308, "Acer Aspire 8942G", ALC662_FIXUP_ASPIRE),
SND_PCI_QUIRK(0x1025, 0x031c, "Gateway NV79", ALC662_FIXUP_SKU_IGNORE),
SND_PCI_QUIRK(0x1025, 0x0349, "eMachines eM250", ALC662_FIXUP_INV_DMIC),
+ SND_PCI_QUIRK(0x1025, 0x034a, "Gateway LT27", ALC662_FIXUP_INV_DMIC),
SND_PCI_QUIRK(0x1025, 0x038b, "Acer Aspire 8943G", ALC662_FIXUP_ASPIRE),
SND_PCI_QUIRK(0x1028, 0x05d8, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x05db, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
diff --git a/sound/soc/codecs/cs42l52.c b/sound/soc/codecs/cs42l52.c
index 987f728..be2ba1b 100644
--- a/sound/soc/codecs/cs42l52.c
+++ b/sound/soc/codecs/cs42l52.c
@@ -195,6 +195,8 @@
static DECLARE_TLV_DB_SCALE(mix_tlv, -50, 50, 0);
+static DECLARE_TLV_DB_SCALE(beep_tlv, -56, 200, 0);
+
static const unsigned int limiter_tlv[] = {
TLV_DB_RANGE_HEAD(2),
0, 2, TLV_DB_SCALE_ITEM(-3000, 600, 0),
@@ -451,7 +453,8 @@
SOC_ENUM("Beep Pitch", beep_pitch_enum),
SOC_ENUM("Beep on Time", beep_ontime_enum),
SOC_ENUM("Beep off Time", beep_offtime_enum),
- SOC_SINGLE_TLV("Beep Volume", CS42L52_BEEP_VOL, 0, 0x1f, 0x07, hl_tlv),
+ SOC_SINGLE_SX_TLV("Beep Volume", CS42L52_BEEP_VOL,
+ 0, 0x07, 0x1f, beep_tlv),
SOC_SINGLE("Beep Mixer Switch", CS42L52_BEEP_TONE_CTL, 5, 1, 1),
SOC_ENUM("Beep Treble Corner Freq", beep_treble_enum),
SOC_ENUM("Beep Bass Corner Freq", beep_bass_enum),
diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c
index 6c8a9e7..760e8bf 100644
--- a/sound/soc/codecs/sgtl5000.c
+++ b/sound/soc/codecs/sgtl5000.c
@@ -153,6 +153,8 @@
static int power_vag_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
+ const u32 mask = SGTL5000_DAC_POWERUP | SGTL5000_ADC_POWERUP;
+
switch (event) {
case SND_SOC_DAPM_POST_PMU:
snd_soc_update_bits(w->codec, SGTL5000_CHIP_ANA_POWER,
@@ -160,9 +162,17 @@
break;
case SND_SOC_DAPM_PRE_PMD:
- snd_soc_update_bits(w->codec, SGTL5000_CHIP_ANA_POWER,
- SGTL5000_VAG_POWERUP, 0);
- msleep(400);
+ /*
+ * Don't clear VAG_POWERUP, when both DAC and ADC are
+ * operational to prevent inadvertently starving the
+ * other one of them.
+ */
+ if ((snd_soc_read(w->codec, SGTL5000_CHIP_ANA_POWER) &
+ mask) != mask) {
+ snd_soc_update_bits(w->codec, SGTL5000_CHIP_ANA_POWER,
+ SGTL5000_VAG_POWERUP, 0);
+ msleep(400);
+ }
break;
default:
break;
@@ -388,7 +398,7 @@
SOC_DOUBLE("Capture Volume", SGTL5000_CHIP_ANA_ADC_CTRL, 0, 4, 0xf, 0),
SOC_SINGLE_TLV("Capture Attenuate Switch (-6dB)",
SGTL5000_CHIP_ANA_ADC_CTRL,
- 8, 2, 0, capture_6db_attenuate),
+ 8, 1, 0, capture_6db_attenuate),
SOC_SINGLE("Capture ZC Switch", SGTL5000_CHIP_ANA_CTRL, 1, 1, 0),
SOC_DOUBLE_TLV("Headphone Playback Volume",
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index bd16010..4375c9f 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -679,13 +679,14 @@
return -EINVAL;
}
- path = list_first_entry(&w->sources, struct snd_soc_dapm_path,
- list_sink);
- if (!path) {
+ if (list_empty(&w->sources)) {
dev_err(dapm->dev, "ASoC: mux %s has no paths\n", w->name);
return -EINVAL;
}
+ path = list_first_entry(&w->sources, struct snd_soc_dapm_path,
+ list_sink);
+
ret = dapm_create_or_share_mixmux_kcontrol(w, 0, path);
if (ret < 0)
return ret;
diff --git a/sound/soc/tegra/tegra30_i2s.c b/sound/soc/tegra/tegra30_i2s.c
index d04146c..47565fd04 100644
--- a/sound/soc/tegra/tegra30_i2s.c
+++ b/sound/soc/tegra/tegra30_i2s.c
@@ -228,7 +228,7 @@
reg = TEGRA30_I2S_CIF_RX_CTRL;
} else {
val |= TEGRA30_AUDIOCIF_CTRL_DIRECTION_TX;
- reg = TEGRA30_I2S_CIF_RX_CTRL;
+ reg = TEGRA30_I2S_CIF_TX_CTRL;
}
regmap_write(i2s->regmap, reg, val);
diff --git a/sound/usb/6fire/comm.c b/sound/usb/6fire/comm.c
index 9e6e3ff..23452ee 100644
--- a/sound/usb/6fire/comm.c
+++ b/sound/usb/6fire/comm.c
@@ -110,19 +110,37 @@
static int usb6fire_comm_write8(struct comm_runtime *rt, u8 request,
u8 reg, u8 value)
{
- u8 buffer[13]; /* 13: maximum length of message */
+ u8 *buffer;
+ int ret;
+
+ /* 13: maximum length of message */
+ buffer = kmalloc(13, GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
usb6fire_comm_init_buffer(buffer, 0x00, request, reg, value, 0x00);
- return usb6fire_comm_send_buffer(buffer, rt->chip->dev);
+ ret = usb6fire_comm_send_buffer(buffer, rt->chip->dev);
+
+ kfree(buffer);
+ return ret;
}
static int usb6fire_comm_write16(struct comm_runtime *rt, u8 request,
u8 reg, u8 vl, u8 vh)
{
- u8 buffer[13]; /* 13: maximum length of message */
+ u8 *buffer;
+ int ret;
+
+ /* 13: maximum length of message */
+ buffer = kmalloc(13, GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
usb6fire_comm_init_buffer(buffer, 0x00, request, reg, vl, vh);
- return usb6fire_comm_send_buffer(buffer, rt->chip->dev);
+ ret = usb6fire_comm_send_buffer(buffer, rt->chip->dev);
+
+ kfree(buffer);
+ return ret;
}
int usb6fire_comm_init(struct sfire_chip *chip)
@@ -135,6 +153,12 @@
if (!rt)
return -ENOMEM;
+ rt->receiver_buffer = kzalloc(COMM_RECEIVER_BUFSIZE, GFP_KERNEL);
+ if (!rt->receiver_buffer) {
+ kfree(rt);
+ return -ENOMEM;
+ }
+
urb = &rt->receiver;
rt->serial = 1;
rt->chip = chip;
@@ -153,6 +177,7 @@
urb->interval = 1;
ret = usb_submit_urb(urb, GFP_KERNEL);
if (ret < 0) {
+ kfree(rt->receiver_buffer);
kfree(rt);
snd_printk(KERN_ERR PREFIX "cannot create comm data receiver.");
return ret;
@@ -171,6 +196,9 @@
void usb6fire_comm_destroy(struct sfire_chip *chip)
{
- kfree(chip->comm);
+ struct comm_runtime *rt = chip->comm;
+
+ kfree(rt->receiver_buffer);
+ kfree(rt);
chip->comm = NULL;
}
diff --git a/sound/usb/6fire/comm.h b/sound/usb/6fire/comm.h
index 6a0840b..780d5ed 100644
--- a/sound/usb/6fire/comm.h
+++ b/sound/usb/6fire/comm.h
@@ -24,7 +24,7 @@
struct sfire_chip *chip;
struct urb receiver;
- u8 receiver_buffer[COMM_RECEIVER_BUFSIZE];
+ u8 *receiver_buffer;
u8 serial; /* urb serial */
diff --git a/sound/usb/6fire/midi.c b/sound/usb/6fire/midi.c
index 2672242..f3dd726 100644
--- a/sound/usb/6fire/midi.c
+++ b/sound/usb/6fire/midi.c
@@ -19,6 +19,10 @@
#include "chip.h"
#include "comm.h"
+enum {
+ MIDI_BUFSIZE = 64
+};
+
static void usb6fire_midi_out_handler(struct urb *urb)
{
struct midi_runtime *rt = urb->context;
@@ -156,6 +160,12 @@
if (!rt)
return -ENOMEM;
+ rt->out_buffer = kzalloc(MIDI_BUFSIZE, GFP_KERNEL);
+ if (!rt->out_buffer) {
+ kfree(rt);
+ return -ENOMEM;
+ }
+
rt->chip = chip;
rt->in_received = usb6fire_midi_in_received;
rt->out_buffer[0] = 0x80; /* 'send midi' command */
@@ -169,6 +179,7 @@
ret = snd_rawmidi_new(chip->card, "6FireUSB", 0, 1, 1, &rt->instance);
if (ret < 0) {
+ kfree(rt->out_buffer);
kfree(rt);
snd_printk(KERN_ERR PREFIX "unable to create midi.\n");
return ret;
@@ -197,6 +208,9 @@
void usb6fire_midi_destroy(struct sfire_chip *chip)
{
- kfree(chip->midi);
+ struct midi_runtime *rt = chip->midi;
+
+ kfree(rt->out_buffer);
+ kfree(rt);
chip->midi = NULL;
}
diff --git a/sound/usb/6fire/midi.h b/sound/usb/6fire/midi.h
index c321006..84851b9 100644
--- a/sound/usb/6fire/midi.h
+++ b/sound/usb/6fire/midi.h
@@ -16,10 +16,6 @@
#include "common.h"
-enum {
- MIDI_BUFSIZE = 64
-};
-
struct midi_runtime {
struct sfire_chip *chip;
struct snd_rawmidi *instance;
@@ -32,7 +28,7 @@
struct snd_rawmidi_substream *out;
struct urb out_urb;
u8 out_serial; /* serial number of out packet */
- u8 out_buffer[MIDI_BUFSIZE];
+ u8 *out_buffer;
int buffer_offset;
void (*in_received)(struct midi_runtime *rt, u8 *data, int length);
diff --git a/sound/usb/6fire/pcm.c b/sound/usb/6fire/pcm.c
index 3d2551c..b5eb97f 100644
--- a/sound/usb/6fire/pcm.c
+++ b/sound/usb/6fire/pcm.c
@@ -582,6 +582,33 @@
urb->instance.number_of_packets = PCM_N_PACKETS_PER_URB;
}
+static int usb6fire_pcm_buffers_init(struct pcm_runtime *rt)
+{
+ int i;
+
+ for (i = 0; i < PCM_N_URBS; i++) {
+ rt->out_urbs[i].buffer = kzalloc(PCM_N_PACKETS_PER_URB
+ * PCM_MAX_PACKET_SIZE, GFP_KERNEL);
+ if (!rt->out_urbs[i].buffer)
+ return -ENOMEM;
+ rt->in_urbs[i].buffer = kzalloc(PCM_N_PACKETS_PER_URB
+ * PCM_MAX_PACKET_SIZE, GFP_KERNEL);
+ if (!rt->in_urbs[i].buffer)
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static void usb6fire_pcm_buffers_destroy(struct pcm_runtime *rt)
+{
+ int i;
+
+ for (i = 0; i < PCM_N_URBS; i++) {
+ kfree(rt->out_urbs[i].buffer);
+ kfree(rt->in_urbs[i].buffer);
+ }
+}
+
int usb6fire_pcm_init(struct sfire_chip *chip)
{
int i;
@@ -593,6 +620,13 @@
if (!rt)
return -ENOMEM;
+ ret = usb6fire_pcm_buffers_init(rt);
+ if (ret) {
+ usb6fire_pcm_buffers_destroy(rt);
+ kfree(rt);
+ return ret;
+ }
+
rt->chip = chip;
rt->stream_state = STREAM_DISABLED;
rt->rate = ARRAY_SIZE(rates);
@@ -614,6 +648,7 @@
ret = snd_pcm_new(chip->card, "DMX6FireUSB", 0, 1, 1, &pcm);
if (ret < 0) {
+ usb6fire_pcm_buffers_destroy(rt);
kfree(rt);
snd_printk(KERN_ERR PREFIX "cannot create pcm instance.\n");
return ret;
@@ -625,6 +660,7 @@
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &pcm_ops);
if (ret) {
+ usb6fire_pcm_buffers_destroy(rt);
kfree(rt);
snd_printk(KERN_ERR PREFIX
"error preallocating pcm buffers.\n");
@@ -669,6 +705,9 @@
void usb6fire_pcm_destroy(struct sfire_chip *chip)
{
- kfree(chip->pcm);
+ struct pcm_runtime *rt = chip->pcm;
+
+ usb6fire_pcm_buffers_destroy(rt);
+ kfree(rt);
chip->pcm = NULL;
}
diff --git a/sound/usb/6fire/pcm.h b/sound/usb/6fire/pcm.h
index 9b01133..f5779d6 100644
--- a/sound/usb/6fire/pcm.h
+++ b/sound/usb/6fire/pcm.h
@@ -32,7 +32,7 @@
struct urb instance;
struct usb_iso_packet_descriptor packets[PCM_N_PACKETS_PER_URB];
/* END DO NOT SEPARATE */
- u8 buffer[PCM_N_PACKETS_PER_URB * PCM_MAX_PACKET_SIZE];
+ u8 *buffer;
struct pcm_urb *peer;
};
diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
index 7a444b5..659950e 100644
--- a/sound/usb/endpoint.c
+++ b/sound/usb/endpoint.c
@@ -591,17 +591,16 @@
ep->stride = frame_bits >> 3;
ep->silence_value = pcm_format == SNDRV_PCM_FORMAT_U8 ? 0x80 : 0;
- /* calculate max. frequency */
- if (ep->maxpacksize) {
+ /* assume max. frequency is 25% higher than nominal */
+ ep->freqmax = ep->freqn + (ep->freqn >> 2);
+ maxsize = ((ep->freqmax + 0xffff) * (frame_bits >> 3))
+ >> (16 - ep->datainterval);
+ /* but wMaxPacketSize might reduce this */
+ if (ep->maxpacksize && ep->maxpacksize < maxsize) {
/* whatever fits into a max. size packet */
maxsize = ep->maxpacksize;
ep->freqmax = (maxsize / (frame_bits >> 3))
<< (16 - ep->datainterval);
- } else {
- /* no max. packet size: just take 25% higher than nominal */
- ep->freqmax = ep->freqn + (ep->freqn >> 2);
- maxsize = ((ep->freqmax + 0xffff) * (frame_bits >> 3))
- >> (16 - ep->datainterval);
}
if (ep->fill_max)
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index d543808..95558ef 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -888,6 +888,7 @@
case USB_ID(0x046d, 0x081b): /* HD Webcam c310 */
case USB_ID(0x046d, 0x081d): /* HD Webcam c510 */
case USB_ID(0x046d, 0x0825): /* HD Webcam c270 */
+ case USB_ID(0x046d, 0x0826): /* HD Webcam c525 */
case USB_ID(0x046d, 0x0991):
/* Most audio usb devices lie about volume resolution.
* Most Logitech webcams have res = 384.
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index 1bc45e7..0df9ede 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -319,19 +319,19 @@
if (altsd->bNumEndpoints < 1)
return -ENODEV;
epd = get_endpoint(alts, 0);
- if (!usb_endpoint_xfer_bulk(epd) ||
+ if (!usb_endpoint_xfer_bulk(epd) &&
!usb_endpoint_xfer_int(epd))
return -ENODEV;
switch (USB_ID_VENDOR(chip->usb_id)) {
case 0x0499: /* Yamaha */
err = create_yamaha_midi_quirk(chip, iface, driver, alts);
- if (err < 0 && err != -ENODEV)
+ if (err != -ENODEV)
return err;
break;
case 0x0582: /* Roland */
err = create_roland_midi_quirk(chip, iface, driver, alts);
- if (err < 0 && err != -ENODEV)
+ if (err != -ENODEV)
return err;
break;
}