Merge branch 'pm-tools'
* pm-tools:
cpupower: Remove redundant error check
cpupower: bench: parse.c: Fix several minor errors
cpupower: mperf monitor: Correct use of ! and &
cpupower: Adjust MAINTAINERS file
PM / tools: cpupower: drop negativity check on unsigned value
diff --git a/Documentation/acpi/enumeration.txt b/Documentation/acpi/enumeration.txt
index fd786ea..e182be5 100644
--- a/Documentation/acpi/enumeration.txt
+++ b/Documentation/acpi/enumeration.txt
@@ -60,12 +60,6 @@
configuring GPIOs it can get its ACPI handle and extract this information
from ACPI tables.
-Currently the kernel is not able to automatically determine from which ACPI
-device it should make the corresponding platform device so we need to add
-the ACPI device explicitly to acpi_platform_device_ids list defined in
-drivers/acpi/acpi_platform.c. This limitation is only for the platform
-devices, SPI and I2C devices are created automatically as described below.
-
DMA support
~~~~~~~~~~~
DMA controllers enumerated via ACPI should be registered in the system to
diff --git a/Documentation/devicetree/bindings/cpufreq/cpufreq-cpu0.txt b/Documentation/devicetree/bindings/cpufreq/cpufreq-cpu0.txt
index f055515..366690c 100644
--- a/Documentation/devicetree/bindings/cpufreq/cpufreq-cpu0.txt
+++ b/Documentation/devicetree/bindings/cpufreq/cpufreq-cpu0.txt
@@ -8,10 +8,12 @@
under node /cpus/cpu@0.
Required properties:
-- operating-points: Refer to Documentation/devicetree/bindings/power/opp.txt
- for details
+- None
Optional properties:
+- operating-points: Refer to Documentation/devicetree/bindings/power/opp.txt for
+ details. OPPs *must* be supplied either via DT, i.e. this property, or
+ populated at runtime.
- clock-latency: Specify the possible maximum transition latency for clock,
in unit of nanoseconds.
- voltage-tolerance: Specify the CPU voltage tolerance in percentage.
diff --git a/Documentation/input/event-codes.txt b/Documentation/input/event-codes.txt
index f1ea2c6..c587a96 100644
--- a/Documentation/input/event-codes.txt
+++ b/Documentation/input/event-codes.txt
@@ -281,6 +281,19 @@
If INPUT_PROP_SEMI_MT is not set, the device is assumed to be a true MT
device.
+INPUT_PROP_TOPBUTTONPAD:
+-----------------------
+Some laptops, most notably the Lenovo *40 series provide a trackstick
+device but do not have physical buttons associated with the trackstick
+device. Instead, the top area of the touchpad is marked to show
+visual/haptic areas for left, middle, right buttons intended to be used
+with the trackstick.
+
+If INPUT_PROP_TOPBUTTONPAD is set, userspace should emulate buttons
+accordingly. This property does not affect kernel behavior.
+The kernel does not provide button emulation for such devices but treats
+them as any other INPUT_PROP_BUTTONPAD device.
+
Guidelines:
==========
The guidelines below ensure proper single-touch and multi-finger functionality.
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index c1b9aa8..b7fa2f5 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -2790,6 +2790,12 @@
leaf rcu_node structure. Useful for very large
systems.
+ rcutree.jiffies_till_sched_qs= [KNL]
+ Set required age in jiffies for a
+ given grace period before RCU starts
+ soliciting quiescent-state help from
+ rcu_note_context_switch().
+
rcutree.jiffies_till_first_fqs= [KNL]
Set delay from grace-period initialization to
first attempt to force quiescent states.
@@ -3526,7 +3532,7 @@
the allocated input device; If set to 0, video driver
will only send out the event without touching backlight
brightness level.
- default: 0
+ default: 1
virtio_mmio.device=
[VMMIO] Memory mapped virtio (platform) device.
diff --git a/Documentation/power/opp.txt b/Documentation/power/opp.txt
index a9adad8..c6279c2 100644
--- a/Documentation/power/opp.txt
+++ b/Documentation/power/opp.txt
@@ -51,9 +51,6 @@
SoC framework -> modifies on required cases certain OPPs -> OPP layer
-> queries to search/retrieve information ->
-Architectures that provide a SoC framework for OPP should select ARCH_HAS_OPP
-to make the OPP layer available.
-
OPP layer expects each domain to be represented by a unique device pointer. SoC
framework registers a set of initial OPPs per device with the OPP layer. This
list is expected to be an optimally small number typically around 5 per device.
diff --git a/MAINTAINERS b/MAINTAINERS
index 76a1b0f..a221b42 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -156,7 +156,6 @@
8169 10/100/1000 GIGABIT ETHERNET DRIVER
M: Realtek linux nic maintainers <nic_swsd@realtek.com>
-M: Francois Romieu <romieu@fr.zoreil.com>
L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/ethernet/realtek/r8169.c
@@ -4511,8 +4510,7 @@
F: drivers/idle/i7300_idle.c
IEEE 802.15.4 SUBSYSTEM
-M: Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
-M: Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
+M: Alexander Aring <alex.aring@gmail.com>
L: linux-zigbee-devel@lists.sourceforge.net (moderated for non-subscribers)
W: http://apps.sourceforge.net/trac/linux-zigbee
T: git git://git.kernel.org/pub/scm/linux/kernel/git/lowpan/lowpan.git
@@ -6958,6 +6956,12 @@
S: Maintained
F: drivers/pinctrl/pinctrl-at91.c
+PIN CONTROLLER - RENESAS
+M: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+L: linux-sh@vger.kernel.org
+S: Maintained
+F: drivers/pinctrl/sh-pfc/
+
PIN CONTROLLER - SAMSUNG
M: Tomasz Figa <t.figa@samsung.com>
M: Thomas Abraham <thomas.abraham@linaro.org>
@@ -7028,8 +7032,10 @@
F: kernel/*timer*
POWER SUPPLY CLASS/SUBSYSTEM and DRIVERS
+M: Sebastian Reichel <sre@kernel.org>
M: Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
M: David Woodhouse <dwmw2@infradead.org>
+L: linux-pm@vger.kernel.org
T: git git://git.infradead.org/battery-2.6.git
S: Maintained
F: include/linux/power_supply.h
@@ -8021,6 +8027,16 @@
F: include/linux/ata.h
F: include/linux/libata.h
+SERIAL ATA AHCI PLATFORM devices support
+M: Hans de Goede <hdegoede@redhat.com>
+M: Tejun Heo <tj@kernel.org>
+L: linux-ide@vger.kernel.org
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata.git
+S: Supported
+F: drivers/ata/ahci_platform.c
+F: drivers/ata/libahci_platform.c
+F: include/linux/ahci_platform.h
+
SERVER ENGINES 10Gbps iSCSI - BladeEngine 2 DRIVER
M: Jayamohan Kallickal <jayamohan.kallickal@emulex.com>
L: linux-scsi@vger.kernel.org
diff --git a/Makefile b/Makefile
index f3c543d..f6a7794 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 3
PATCHLEVEL = 16
SUBLEVEL = 0
-EXTRAVERSION = -rc5
+EXTRAVERSION = -rc7
NAME = Shuffling Zombie Juror
# *DOCUMENTATION*
@@ -688,6 +688,8 @@
endif
endif
+KBUILD_CFLAGS += $(call cc-option, -fno-var-tracking-assignments)
+
ifdef CONFIG_DEBUG_INFO
KBUILD_CFLAGS += -g
KBUILD_AFLAGS += -Wa,-gdwarf-2
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 245058b..88acf8b 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -6,6 +6,7 @@
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_HAVE_CUSTOM_GPIO_H
select ARCH_MIGHT_HAVE_PC_PARPORT
+ select ARCH_SUPPORTS_ATOMIC_RMW
select ARCH_USE_BUILTIN_BSWAP
select ARCH_USE_CMPXCHG_LOCKREF
select ARCH_WANT_IPC_PARSE_VERSION
diff --git a/arch/arm/boot/dts/at91sam9n12.dtsi b/arch/arm/boot/dts/at91sam9n12.dtsi
index 2877959..b84bac5 100644
--- a/arch/arm/boot/dts/at91sam9n12.dtsi
+++ b/arch/arm/boot/dts/at91sam9n12.dtsi
@@ -925,7 +925,7 @@
compatible = "atmel,at91rm9200-ohci", "usb-ohci";
reg = <0x00500000 0x00100000>;
interrupts = <22 IRQ_TYPE_LEVEL_HIGH 2>;
- clocks = <&usb>, <&uhphs_clk>, <&udphs_clk>,
+ clocks = <&usb>, <&uhphs_clk>, <&uhphs_clk>,
<&uhpck>;
clock-names = "usb_clk", "ohci_clk", "hclk", "uhpck";
status = "disabled";
diff --git a/arch/arm/boot/dts/at91sam9x5.dtsi b/arch/arm/boot/dts/at91sam9x5.dtsi
index 2ebc421..2c0d6ea 100644
--- a/arch/arm/boot/dts/at91sam9x5.dtsi
+++ b/arch/arm/boot/dts/at91sam9x5.dtsi
@@ -1124,6 +1124,7 @@
compatible = "atmel,at91sam9rl-pwm";
reg = <0xf8034000 0x300>;
interrupts = <18 IRQ_TYPE_LEVEL_HIGH 4>;
+ clocks = <&pwm_clk>;
#pwm-cells = <3>;
status = "disabled";
};
@@ -1155,8 +1156,7 @@
compatible = "atmel,at91rm9200-ohci", "usb-ohci";
reg = <0x00600000 0x100000>;
interrupts = <22 IRQ_TYPE_LEVEL_HIGH 2>;
- clocks = <&usb>, <&uhphs_clk>, <&udphs_clk>,
- <&uhpck>;
+ clocks = <&usb>, <&uhphs_clk>, <&uhphs_clk>, <&uhpck>;
clock-names = "usb_clk", "ohci_clk", "hclk", "uhpck";
status = "disabled";
};
diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c
index 9d85318..e35d880 100644
--- a/arch/arm/kernel/topology.c
+++ b/arch/arm/kernel/topology.c
@@ -275,7 +275,7 @@
cpu_topology[cpuid].socket_id, mpidr);
}
-static inline const int cpu_corepower_flags(void)
+static inline int cpu_corepower_flags(void)
{
return SD_SHARE_PKG_RESOURCES | SD_SHARE_POWERDOMAIN;
}
diff --git a/arch/arm/mach-exynos/Kconfig b/arch/arm/mach-exynos/Kconfig
index 8f9b66c..f7889f6 100644
--- a/arch/arm/mach-exynos/Kconfig
+++ b/arch/arm/mach-exynos/Kconfig
@@ -100,7 +100,6 @@
default y
depends on ARCH_EXYNOS5
select ARCH_DMA_ADDR_T_64BIT if ARM_LPAE
- select ARCH_HAS_OPP
select HAVE_ARM_ARCH_TIMER
select AUTO_ZRELADDR
select MIGHT_HAVE_PCI
diff --git a/arch/arm/mach-exynos/hotplug.c b/arch/arm/mach-exynos/hotplug.c
index 8a134d0..920a4ba 100644
--- a/arch/arm/mach-exynos/hotplug.c
+++ b/arch/arm/mach-exynos/hotplug.c
@@ -40,15 +40,17 @@
static inline void platform_do_lowpower(unsigned int cpu, int *spurious)
{
+ u32 mpidr = cpu_logical_map(cpu);
+ u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
+
for (;;) {
- /* make cpu1 to be turned off at next WFI command */
- if (cpu == 1)
- exynos_cpu_power_down(cpu);
+ /* Turn the CPU off on next WFI instruction. */
+ exynos_cpu_power_down(core_id);
wfi();
- if (pen_release == cpu_logical_map(cpu)) {
+ if (pen_release == core_id) {
/*
* OK, proper wakeup, we're done
*/
diff --git a/arch/arm/mach-exynos/platsmp.c b/arch/arm/mach-exynos/platsmp.c
index 1c8d31e..50b9aad 100644
--- a/arch/arm/mach-exynos/platsmp.c
+++ b/arch/arm/mach-exynos/platsmp.c
@@ -90,7 +90,8 @@
static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
{
unsigned long timeout;
- unsigned long phys_cpu = cpu_logical_map(cpu);
+ u32 mpidr = cpu_logical_map(cpu);
+ u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
int ret = -ENOSYS;
/*
@@ -104,17 +105,18 @@
* the holding pen - release it, then wait for it to flag
* that it has been released by resetting pen_release.
*
- * Note that "pen_release" is the hardware CPU ID, whereas
+ * Note that "pen_release" is the hardware CPU core ID, whereas
* "cpu" is Linux's internal ID.
*/
- write_pen_release(phys_cpu);
+ write_pen_release(core_id);
- if (!exynos_cpu_power_state(cpu)) {
- exynos_cpu_power_up(cpu);
+ if (!exynos_cpu_power_state(core_id)) {
+ exynos_cpu_power_up(core_id);
timeout = 10;
/* wait max 10 ms until cpu1 is on */
- while (exynos_cpu_power_state(cpu) != S5P_CORE_LOCAL_PWR_EN) {
+ while (exynos_cpu_power_state(core_id)
+ != S5P_CORE_LOCAL_PWR_EN) {
if (timeout-- == 0)
break;
@@ -145,20 +147,20 @@
* Try to set boot address using firmware first
* and fall back to boot register if it fails.
*/
- ret = call_firmware_op(set_cpu_boot_addr, phys_cpu, boot_addr);
+ ret = call_firmware_op(set_cpu_boot_addr, core_id, boot_addr);
if (ret && ret != -ENOSYS)
goto fail;
if (ret == -ENOSYS) {
- void __iomem *boot_reg = cpu_boot_reg(phys_cpu);
+ void __iomem *boot_reg = cpu_boot_reg(core_id);
if (IS_ERR(boot_reg)) {
ret = PTR_ERR(boot_reg);
goto fail;
}
- __raw_writel(boot_addr, cpu_boot_reg(phys_cpu));
+ __raw_writel(boot_addr, cpu_boot_reg(core_id));
}
- call_firmware_op(cpu_boot, phys_cpu);
+ call_firmware_op(cpu_boot, core_id);
arch_send_wakeup_ipi_mask(cpumask_of(cpu));
@@ -227,22 +229,24 @@
* boot register if it fails.
*/
for (i = 1; i < max_cpus; ++i) {
- unsigned long phys_cpu;
unsigned long boot_addr;
+ u32 mpidr;
+ u32 core_id;
int ret;
- phys_cpu = cpu_logical_map(i);
+ mpidr = cpu_logical_map(i);
+ core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
boot_addr = virt_to_phys(exynos4_secondary_startup);
- ret = call_firmware_op(set_cpu_boot_addr, phys_cpu, boot_addr);
+ ret = call_firmware_op(set_cpu_boot_addr, core_id, boot_addr);
if (ret && ret != -ENOSYS)
break;
if (ret == -ENOSYS) {
- void __iomem *boot_reg = cpu_boot_reg(phys_cpu);
+ void __iomem *boot_reg = cpu_boot_reg(core_id);
if (IS_ERR(boot_reg))
break;
- __raw_writel(boot_addr, cpu_boot_reg(phys_cpu));
+ __raw_writel(boot_addr, cpu_boot_reg(core_id));
}
}
}
diff --git a/arch/arm/mach-highbank/Kconfig b/arch/arm/mach-highbank/Kconfig
index a5960e2..31aa866 100644
--- a/arch/arm/mach-highbank/Kconfig
+++ b/arch/arm/mach-highbank/Kconfig
@@ -2,7 +2,6 @@
bool "Calxeda ECX-1000/2000 (Highbank/Midway)" if ARCH_MULTI_V7
select ARCH_DMA_ADDR_T_64BIT if ARM_LPAE
select ARCH_HAS_HOLES_MEMORYMODEL
- select ARCH_HAS_OPP
select ARCH_SUPPORTS_BIG_ENDIAN
select ARM_AMBA
select ARM_ERRATA_764369 if SMP
diff --git a/arch/arm/mach-imx/Kconfig b/arch/arm/mach-imx/Kconfig
index 4b51857..ab6bcfd 100644
--- a/arch/arm/mach-imx/Kconfig
+++ b/arch/arm/mach-imx/Kconfig
@@ -1,6 +1,5 @@
menuconfig ARCH_MXC
bool "Freescale i.MX family" if ARCH_MULTI_V4_V5 || ARCH_MULTI_V6_V7
- select ARCH_HAS_OPP
select ARCH_REQUIRE_GPIOLIB
select ARM_CPU_SUSPEND if PM
select CLKSRC_MMIO
diff --git a/arch/arm/mach-imx/clk-imx6q.c b/arch/arm/mach-imx/clk-imx6q.c
index 8e795de..8556c78 100644
--- a/arch/arm/mach-imx/clk-imx6q.c
+++ b/arch/arm/mach-imx/clk-imx6q.c
@@ -70,7 +70,7 @@
static const char *lvds_sels[] = {
"dummy", "dummy", "dummy", "dummy", "dummy", "dummy",
"pll4_audio", "pll5_video", "pll8_mlb", "enet_ref",
- "pcie_ref", "sata_ref",
+ "pcie_ref_125m", "sata_ref_100m",
};
enum mx6q_clks {
@@ -491,7 +491,7 @@
/* All existing boards with PCIe use LVDS1 */
if (IS_ENABLED(CONFIG_PCI_IMX6))
- clk_set_parent(clk[lvds1_sel], clk[sata_ref]);
+ clk_set_parent(clk[lvds1_sel], clk[sata_ref_100m]);
/* Set initial power mode */
imx6q_set_lpm(WAIT_CLOCKED);
diff --git a/arch/arm/mach-mvebu/coherency.c b/arch/arm/mach-mvebu/coherency.c
index 477202f..2bdc323 100644
--- a/arch/arm/mach-mvebu/coherency.c
+++ b/arch/arm/mach-mvebu/coherency.c
@@ -292,6 +292,10 @@
.notifier_call = mvebu_hwcc_notifier,
};
+static struct notifier_block mvebu_hwcc_pci_nb = {
+ .notifier_call = mvebu_hwcc_notifier,
+};
+
static void __init armada_370_coherency_init(struct device_node *np)
{
struct resource res;
@@ -427,7 +431,7 @@
{
if (coherency_available())
bus_register_notifier(&pci_bus_type,
- &mvebu_hwcc_nb);
+ &mvebu_hwcc_pci_nb);
return 0;
}
diff --git a/arch/arm/mach-mvebu/headsmp-a9.S b/arch/arm/mach-mvebu/headsmp-a9.S
index 5925366..da5bb29 100644
--- a/arch/arm/mach-mvebu/headsmp-a9.S
+++ b/arch/arm/mach-mvebu/headsmp-a9.S
@@ -15,6 +15,8 @@
#include <linux/linkage.h>
#include <linux/init.h>
+#include <asm/assembler.h>
+
__CPUINIT
#define CPU_RESUME_ADDR_REG 0xf10182d4
@@ -22,13 +24,18 @@
.global armada_375_smp_cpu1_enable_code_end
armada_375_smp_cpu1_enable_code_start:
- ldr r0, [pc, #4]
+ARM_BE8(setend be)
+ adr r0, 1f
+ ldr r0, [r0]
ldr r1, [r0]
+ARM_BE8(rev r1, r1)
mov pc, r1
+1:
.word CPU_RESUME_ADDR_REG
armada_375_smp_cpu1_enable_code_end:
ENTRY(mvebu_cortex_a9_secondary_startup)
+ARM_BE8(setend be)
bl v7_invalidate_l1
b secondary_startup
ENDPROC(mvebu_cortex_a9_secondary_startup)
diff --git a/arch/arm/mach-mvebu/pmsu.c b/arch/arm/mach-mvebu/pmsu.c
index a1d407c..25aa823 100644
--- a/arch/arm/mach-mvebu/pmsu.c
+++ b/arch/arm/mach-mvebu/pmsu.c
@@ -201,12 +201,12 @@
/* Test the CR_C bit and set it if it was cleared */
asm volatile(
- "mrc p15, 0, %0, c1, c0, 0 \n\t"
- "tst %0, #(1 << 2) \n\t"
- "orreq %0, %0, #(1 << 2) \n\t"
- "mcreq p15, 0, %0, c1, c0, 0 \n\t"
+ "mrc p15, 0, r0, c1, c0, 0 \n\t"
+ "tst r0, #(1 << 2) \n\t"
+ "orreq r0, r0, #(1 << 2) \n\t"
+ "mcreq p15, 0, r0, c1, c0, 0 \n\t"
"isb "
- : : "r" (0));
+ : : : "r0");
pr_warn("Failed to suspend the system\n");
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index 1c1ed73..e7189dc 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -15,7 +15,6 @@
bool "TI OMAP3"
depends on ARCH_MULTI_V7
select ARCH_OMAP2PLUS
- select ARCH_HAS_OPP
select ARM_CPU_SUSPEND if PM
select OMAP_INTERCONNECT
select PM_OPP if PM
diff --git a/arch/arm/mach-shmobile/Kconfig b/arch/arm/mach-shmobile/Kconfig
index 7980730..3a6e3c2 100644
--- a/arch/arm/mach-shmobile/Kconfig
+++ b/arch/arm/mach-shmobile/Kconfig
@@ -85,7 +85,6 @@
select CPU_V7
select SH_CLK_CPG
select RENESAS_IRQC
- select ARCH_HAS_OPP
select SYS_SUPPORTS_SH_CMT
select SYS_SUPPORTS_SH_TMU
@@ -263,7 +262,6 @@
config MACH_KZM9G
bool "KZM-A9-GT board"
depends on ARCH_SH73A0
- select ARCH_HAS_OPP
select ARCH_REQUIRE_GPIOLIB
select REGULATOR_FIXED_VOLTAGE if REGULATOR
select SND_SOC_AK4642 if SND_SIMPLE_CARD
diff --git a/arch/arm/mach-vexpress/Kconfig b/arch/arm/mach-vexpress/Kconfig
index d8b9330..1af7032 100644
--- a/arch/arm/mach-vexpress/Kconfig
+++ b/arch/arm/mach-vexpress/Kconfig
@@ -64,7 +64,6 @@
config ARCH_VEXPRESS_SPC
bool "Versatile Express Serial Power Controller (SPC)"
- select ARCH_HAS_OPP
select PM_OPP
help
The TC2 (A15x2 A7x3) versatile express core tile integrates a logic
diff --git a/arch/arm/mach-zynq/Kconfig b/arch/arm/mach-zynq/Kconfig
index 0c164f8..aaa5162 100644
--- a/arch/arm/mach-zynq/Kconfig
+++ b/arch/arm/mach-zynq/Kconfig
@@ -1,6 +1,5 @@
config ARCH_ZYNQ
bool "Xilinx Zynq ARM Cortex A9 Platform" if ARCH_MULTI_V7
- select ARCH_HAS_OPP
select ARCH_SUPPORTS_BIG_ENDIAN
select ARM_AMBA
select ARM_GIC
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index a474de34..839f48c 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -4,6 +4,7 @@
select ARCH_HAS_OPP
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_USE_CMPXCHG_LOCKREF
+ select ARCH_SUPPORTS_ATOMIC_RMW
select ARCH_WANT_OPTIONAL_GPIOLIB
select ARCH_WANT_COMPAT_IPC_PARSE_VERSION
select ARCH_WANT_FRAME_POINTERS
diff --git a/arch/arm64/kernel/efi-stub.c b/arch/arm64/kernel/efi-stub.c
index 60e98a63..e786e6c 100644
--- a/arch/arm64/kernel/efi-stub.c
+++ b/arch/arm64/kernel/efi-stub.c
@@ -12,8 +12,6 @@
#include <linux/efi.h>
#include <linux/libfdt.h>
#include <asm/sections.h>
-#include <generated/compile.h>
-#include <generated/utsrelease.h>
/*
* AArch64 requires the DTB to be 8-byte aligned in the first 512MiB from
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index f43db8a..e90c542 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -60,6 +60,17 @@
early_param("initrd", early_initrd);
#endif
+/*
+ * Return the maximum physical address for ZONE_DMA (DMA_BIT_MASK(32)). It
+ * currently assumes that for memory starting above 4G, 32-bit devices will
+ * use a DMA offset.
+ */
+static phys_addr_t max_zone_dma_phys(void)
+{
+ phys_addr_t offset = memblock_start_of_DRAM() & GENMASK_ULL(63, 32);
+ return min(offset + (1ULL << 32), memblock_end_of_DRAM());
+}
+
static void __init zone_sizes_init(unsigned long min, unsigned long max)
{
struct memblock_region *reg;
@@ -70,9 +81,7 @@
/* 4GB maximum for 32-bit only capable devices */
if (IS_ENABLED(CONFIG_ZONE_DMA)) {
- unsigned long max_dma_phys =
- (unsigned long)(dma_to_phys(NULL, DMA_BIT_MASK(32)) + 1);
- max_dma = max(min, min(max, max_dma_phys >> PAGE_SHIFT));
+ max_dma = PFN_DOWN(max_zone_dma_phys());
zone_size[ZONE_DMA] = max_dma - min;
}
zone_size[ZONE_NORMAL] = max - max_dma;
@@ -146,7 +155,7 @@
/* 4GB maximum for 32-bit only capable devices */
if (IS_ENABLED(CONFIG_ZONE_DMA))
- dma_phys_limit = dma_to_phys(NULL, DMA_BIT_MASK(32)) + 1;
+ dma_phys_limit = max_zone_dma_phys();
dma_contiguous_reserve(dma_phys_limit);
memblock_allow_resize();
diff --git a/arch/blackfin/configs/BF609-EZKIT_defconfig b/arch/blackfin/configs/BF609-EZKIT_defconfig
index a7e9bfd..fcec5ce 100644
--- a/arch/blackfin/configs/BF609-EZKIT_defconfig
+++ b/arch/blackfin/configs/BF609-EZKIT_defconfig
@@ -102,7 +102,7 @@
CONFIG_I2C_BLACKFIN_TWI=y
CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100
CONFIG_SPI=y
-CONFIG_SPI_BFIN_V3=y
+CONFIG_SPI_ADI_V3=y
CONFIG_GPIOLIB=y
CONFIG_GPIO_SYSFS=y
# CONFIG_HWMON is not set
diff --git a/arch/blackfin/kernel/vmlinux.lds.S b/arch/blackfin/kernel/vmlinux.lds.S
index ba35864..c9eec84 100644
--- a/arch/blackfin/kernel/vmlinux.lds.S
+++ b/arch/blackfin/kernel/vmlinux.lds.S
@@ -145,7 +145,7 @@
.text_l1 L1_CODE_START : AT(LOADADDR(.exit.data) + SIZEOF(.exit.data))
#else
- .init.data : AT(__data_lma + __data_len)
+ .init.data : AT(__data_lma + __data_len + 32)
{
__sinitdata = .;
INIT_DATA
diff --git a/arch/blackfin/mach-bf533/boards/blackstamp.c b/arch/blackfin/mach-bf533/boards/blackstamp.c
index 63b0e4f..0ccf0cf 100644
--- a/arch/blackfin/mach-bf533/boards/blackstamp.c
+++ b/arch/blackfin/mach-bf533/boards/blackstamp.c
@@ -20,6 +20,7 @@
#include <linux/spi/spi.h>
#include <linux/spi/flash.h>
#include <linux/irq.h>
+#include <linux/gpio.h>
#include <linux/i2c.h>
#include <asm/dma.h>
#include <asm/bfin5xx_spi.h>
diff --git a/arch/blackfin/mach-bf537/boards/cm_bf537e.c b/arch/blackfin/mach-bf537/boards/cm_bf537e.c
index c65c6db..1e7290e 100644
--- a/arch/blackfin/mach-bf537/boards/cm_bf537e.c
+++ b/arch/blackfin/mach-bf537/boards/cm_bf537e.c
@@ -21,6 +21,7 @@
#endif
#include <linux/ata_platform.h>
#include <linux/irq.h>
+#include <linux/gpio.h>
#include <asm/dma.h>
#include <asm/bfin5xx_spi.h>
#include <asm/portmux.h>
diff --git a/arch/blackfin/mach-bf537/boards/cm_bf537u.c b/arch/blackfin/mach-bf537/boards/cm_bf537u.c
index af58454..c7495dc 100644
--- a/arch/blackfin/mach-bf537/boards/cm_bf537u.c
+++ b/arch/blackfin/mach-bf537/boards/cm_bf537u.c
@@ -21,6 +21,7 @@
#endif
#include <linux/ata_platform.h>
#include <linux/irq.h>
+#include <linux/gpio.h>
#include <asm/dma.h>
#include <asm/bfin5xx_spi.h>
#include <asm/portmux.h>
diff --git a/arch/blackfin/mach-bf537/boards/tcm_bf537.c b/arch/blackfin/mach-bf537/boards/tcm_bf537.c
index a021122..6b988ad 100644
--- a/arch/blackfin/mach-bf537/boards/tcm_bf537.c
+++ b/arch/blackfin/mach-bf537/boards/tcm_bf537.c
@@ -21,6 +21,7 @@
#endif
#include <linux/ata_platform.h>
#include <linux/irq.h>
+#include <linux/gpio.h>
#include <asm/dma.h>
#include <asm/bfin5xx_spi.h>
#include <asm/portmux.h>
diff --git a/arch/blackfin/mach-bf548/boards/ezkit.c b/arch/blackfin/mach-bf548/boards/ezkit.c
index 90138e6..1fe7ff2 100644
--- a/arch/blackfin/mach-bf548/boards/ezkit.c
+++ b/arch/blackfin/mach-bf548/boards/ezkit.c
@@ -2118,7 +2118,7 @@
PIN_MAP_MUX_GROUP_DEFAULT("bfin-rotary", "pinctrl-adi2.0", NULL, "rotary"),
PIN_MAP_MUX_GROUP_DEFAULT("bfin_can.0", "pinctrl-adi2.0", NULL, "can0"),
PIN_MAP_MUX_GROUP_DEFAULT("bfin_can.1", "pinctrl-adi2.0", NULL, "can1"),
- PIN_MAP_MUX_GROUP_DEFAULT("bf54x-lq043", "pinctrl-adi2.0", NULL, "ppi0_24b"),
+ PIN_MAP_MUX_GROUP_DEFAULT("bf54x-lq043", "pinctrl-adi2.0", "ppi0_24bgrp", "ppi0"),
PIN_MAP_MUX_GROUP_DEFAULT("bfin-i2s.0", "pinctrl-adi2.0", NULL, "sport0"),
PIN_MAP_MUX_GROUP_DEFAULT("bfin-tdm.0", "pinctrl-adi2.0", NULL, "sport0"),
PIN_MAP_MUX_GROUP_DEFAULT("bfin-ac97.0", "pinctrl-adi2.0", NULL, "sport0"),
@@ -2140,7 +2140,9 @@
PIN_MAP_MUX_GROUP_DEFAULT("pata-bf54x", "pinctrl-adi2.0", NULL, "atapi_alter"),
#endif
PIN_MAP_MUX_GROUP_DEFAULT("bf5xx-nand.0", "pinctrl-adi2.0", NULL, "nfc0"),
- PIN_MAP_MUX_GROUP_DEFAULT("bf54x-keys", "pinctrl-adi2.0", NULL, "keys_4x4"),
+ PIN_MAP_MUX_GROUP_DEFAULT("bf54x-keys", "pinctrl-adi2.0", "keys_4x4grp", "keys"),
+ PIN_MAP_MUX_GROUP("bf54x-keys", "4bit", "pinctrl-adi2.0", "keys_4x4grp", "keys"),
+ PIN_MAP_MUX_GROUP("bf54x-keys", "8bit", "pinctrl-adi2.0", "keys_8x8grp", "keys"),
};
static int __init ezkit_init(void)
diff --git a/arch/blackfin/mach-bf561/boards/acvilon.c b/arch/blackfin/mach-bf561/boards/acvilon.c
index 430b16d..6ab9515 100644
--- a/arch/blackfin/mach-bf561/boards/acvilon.c
+++ b/arch/blackfin/mach-bf561/boards/acvilon.c
@@ -44,6 +44,7 @@
#include <linux/spi/flash.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
+#include <linux/gpio.h>
#include <linux/jiffies.h>
#include <linux/i2c-pca-platform.h>
#include <linux/delay.h>
diff --git a/arch/blackfin/mach-bf561/boards/cm_bf561.c b/arch/blackfin/mach-bf561/boards/cm_bf561.c
index 9f777df..e862f78 100644
--- a/arch/blackfin/mach-bf561/boards/cm_bf561.c
+++ b/arch/blackfin/mach-bf561/boards/cm_bf561.c
@@ -18,6 +18,7 @@
#endif
#include <linux/ata_platform.h>
#include <linux/irq.h>
+#include <linux/gpio.h>
#include <asm/dma.h>
#include <asm/bfin5xx_spi.h>
#include <asm/portmux.h>
diff --git a/arch/blackfin/mach-bf561/boards/ezkit.c b/arch/blackfin/mach-bf561/boards/ezkit.c
index 88dee43..2de71e8 100644
--- a/arch/blackfin/mach-bf561/boards/ezkit.c
+++ b/arch/blackfin/mach-bf561/boards/ezkit.c
@@ -14,6 +14,7 @@
#include <linux/spi/spi.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
+#include <linux/gpio.h>
#include <linux/delay.h>
#include <asm/dma.h>
#include <asm/bfin5xx_spi.h>
diff --git a/arch/blackfin/mach-bf609/boards/ezkit.c b/arch/blackfin/mach-bf609/boards/ezkit.c
index 1ba4600..e2c0b02 100644
--- a/arch/blackfin/mach-bf609/boards/ezkit.c
+++ b/arch/blackfin/mach-bf609/boards/ezkit.c
@@ -698,8 +698,6 @@
{
#define CONFIG_SMC_GCTL_VAL 0x00000010
- if (!devm_pinctrl_get_select_default(&pdev->dev))
- return -EBUSY;
bfin_write32(SMC_GCTL, CONFIG_SMC_GCTL_VAL);
bfin_write32(SMC_B0CTL, 0x01002011);
bfin_write32(SMC_B0TIM, 0x08170977);
@@ -709,7 +707,6 @@
void bf609_nor_flash_exit(struct platform_device *pdev)
{
- devm_pinctrl_put(pdev->dev.pins->p);
bfin_write32(SMC_GCTL, 0);
}
@@ -2058,15 +2055,14 @@
PIN_MAP_MUX_GROUP_DEFAULT("bfin-rotary", "pinctrl-adi2.0", NULL, "rotary"),
PIN_MAP_MUX_GROUP_DEFAULT("bfin_can.0", "pinctrl-adi2.0", NULL, "can0"),
PIN_MAP_MUX_GROUP_DEFAULT("physmap-flash.0", "pinctrl-adi2.0", NULL, "smc0"),
- PIN_MAP_MUX_GROUP_DEFAULT("bf609_nl8048.2", "pinctrl-adi2.0", NULL, "ppi2_16b"),
- PIN_MAP_MUX_GROUP_DEFAULT("bfin_display.0", "pinctrl-adi2.0", NULL, "ppi0_16b"),
-#if IS_ENABLED(CONFIG_VIDEO_MT9M114)
- PIN_MAP_MUX_GROUP_DEFAULT("bfin_capture.0", "pinctrl-adi2.0", NULL, "ppi0_8b"),
-#elif IS_ENABLED(CONFIG_VIDEO_VS6624)
- PIN_MAP_MUX_GROUP_DEFAULT("bfin_capture.0", "pinctrl-adi2.0", NULL, "ppi0_16b"),
-#else
- PIN_MAP_MUX_GROUP_DEFAULT("bfin_capture.0", "pinctrl-adi2.0", NULL, "ppi0_24b"),
-#endif
+ PIN_MAP_MUX_GROUP_DEFAULT("bf609_nl8048.2", "pinctrl-adi2.0", "ppi2_16bgrp", "ppi2"),
+ PIN_MAP_MUX_GROUP("bfin_display.0", "8bit", "pinctrl-adi2.0", "ppi2_8bgrp", "ppi2"),
+ PIN_MAP_MUX_GROUP_DEFAULT("bfin_display.0", "pinctrl-adi2.0", "ppi2_16bgrp", "ppi2"),
+ PIN_MAP_MUX_GROUP("bfin_display.0", "16bit", "pinctrl-adi2.0", "ppi2_16bgrp", "ppi2"),
+ PIN_MAP_MUX_GROUP("bfin_capture.0", "8bit", "pinctrl-adi2.0", "ppi0_8bgrp", "ppi0"),
+ PIN_MAP_MUX_GROUP_DEFAULT("bfin_capture.0", "pinctrl-adi2.0", "ppi0_16bgrp", "ppi0"),
+ PIN_MAP_MUX_GROUP("bfin_capture.0", "16bit", "pinctrl-adi2.0", "ppi0_16bgrp", "ppi0"),
+ PIN_MAP_MUX_GROUP("bfin_capture.0", "24bit", "pinctrl-adi2.0", "ppi0_24bgrp", "ppi0"),
PIN_MAP_MUX_GROUP_DEFAULT("bfin-i2s.0", "pinctrl-adi2.0", NULL, "sport0"),
PIN_MAP_MUX_GROUP_DEFAULT("bfin-tdm.0", "pinctrl-adi2.0", NULL, "sport0"),
PIN_MAP_MUX_GROUP_DEFAULT("bfin-i2s.1", "pinctrl-adi2.0", NULL, "sport1"),
diff --git a/arch/blackfin/mach-bf609/include/mach/pm.h b/arch/blackfin/mach-bf609/include/mach/pm.h
index 3ca0fb9..a1efd93 100644
--- a/arch/blackfin/mach-bf609/include/mach/pm.h
+++ b/arch/blackfin/mach-bf609/include/mach/pm.h
@@ -10,6 +10,7 @@
#define __MACH_BF609_PM_H__
#include <linux/suspend.h>
+#include <linux/platform_device.h>
extern int bfin609_pm_enter(suspend_state_t state);
extern int bf609_pm_prepare(void);
@@ -19,6 +20,6 @@
void bfin_sec_raise_irq(unsigned int sid);
void coreb_enable(void);
-int bf609_nor_flash_init(void);
-void bf609_nor_flash_exit(void);
+int bf609_nor_flash_init(struct platform_device *pdev);
+void bf609_nor_flash_exit(struct platform_device *pdev);
#endif
diff --git a/arch/blackfin/mach-bf609/pm.c b/arch/blackfin/mach-bf609/pm.c
index 0cdd695..b1bfcf4 100644
--- a/arch/blackfin/mach-bf609/pm.c
+++ b/arch/blackfin/mach-bf609/pm.c
@@ -291,13 +291,13 @@
#if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE)
static int smc_pm_syscore_suspend(void)
{
- bf609_nor_flash_exit();
+ bf609_nor_flash_exit(NULL);
return 0;
}
static void smc_pm_syscore_resume(void)
{
- bf609_nor_flash_init();
+ bf609_nor_flash_init(NULL);
}
static struct syscore_ops smc_pm_syscore_ops = {
diff --git a/arch/blackfin/mach-common/ints-priority.c b/arch/blackfin/mach-common/ints-priority.c
index 867b7ce..1f94784 100644
--- a/arch/blackfin/mach-common/ints-priority.c
+++ b/arch/blackfin/mach-common/ints-priority.c
@@ -1208,8 +1208,6 @@
bfin_sec_set_priority(CONFIG_SEC_IRQ_PRIORITY_LEVELS, sec_int_priority);
- bfin_sec_set_priority(CONFIG_SEC_IRQ_PRIORITY_LEVELS, sec_int_priority);
-
/* Enable interrupts IVG7-15 */
bfin_irq_flags |= IMASK_IVG15 |
IMASK_IVG14 | IMASK_IVG13 | IMASK_IVG12 | IMASK_IVG11 |
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 2f3abcf..44a6915 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -10,6 +10,7 @@
select ARCH_MIGHT_HAVE_PC_SERIO
select PCI if (!IA64_HP_SIM)
select ACPI if (!IA64_HP_SIM)
+ select ARCH_MIGHT_HAVE_ACPI_PDC if ACPI
select PM if (!IA64_HP_SIM)
select HAVE_UNSTABLE_SCHED_CLOCK
select HAVE_IDE
diff --git a/arch/ia64/include/asm/acenv.h b/arch/ia64/include/asm/acenv.h
index 3f9eaee..35ff13a 100644
--- a/arch/ia64/include/asm/acenv.h
+++ b/arch/ia64/include/asm/acenv.h
@@ -19,8 +19,6 @@
/* Asm macros */
-#ifdef CONFIG_ACPI
-
static inline int
ia64_acpi_acquire_global_lock(unsigned int *lock)
{
@@ -51,6 +49,4 @@
#define ACPI_RELEASE_GLOBAL_LOCK(facs, Acq) \
((Acq) = ia64_acpi_release_global_lock(&facs->global_lock))
-#endif
-
#endif /* _ASM_IA64_ACENV_H */
diff --git a/arch/ia64/include/asm/acpi.h b/arch/ia64/include/asm/acpi.h
index 75dc59a..a1d91ab 100644
--- a/arch/ia64/include/asm/acpi.h
+++ b/arch/ia64/include/asm/acpi.h
@@ -40,6 +40,11 @@
#define acpi_noirq 0 /* ACPI always enabled on IA64 */
#define acpi_pci_disabled 0 /* ACPI PCI always enabled on IA64 */
#define acpi_strict 1 /* no ACPI spec workarounds on IA64 */
+
+static inline bool acpi_has_cpu_in_madt(void)
+{
+ return !!acpi_lapic;
+}
#endif
#define acpi_processor_cstate_check(x) (x) /* no idle limits on IA64 :) */
static inline void disable_acpi(void) { }
diff --git a/arch/parisc/include/uapi/asm/signal.h b/arch/parisc/include/uapi/asm/signal.h
index a2fa2971..f5645d6 100644
--- a/arch/parisc/include/uapi/asm/signal.h
+++ b/arch/parisc/include/uapi/asm/signal.h
@@ -69,8 +69,6 @@
#define SA_NOMASK SA_NODEFER
#define SA_ONESHOT SA_RESETHAND
-#define SA_RESTORER 0x04000000 /* obsolete -- ignored */
-
#define MINSIGSTKSZ 2048
#define SIGSTKSZ 8192
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
index ae085ad..0bef864 100644
--- a/arch/parisc/mm/init.c
+++ b/arch/parisc/mm/init.c
@@ -728,7 +728,6 @@
#endif
empty_zero_page = alloc_bootmem_pages(PAGE_SIZE);
- memset(empty_zero_page, 0, PAGE_SIZE);
}
static void __init gateway_init(void)
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index fefe7c8..80b94b0 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -145,6 +145,7 @@
select HAVE_IRQ_EXIT_ON_IRQ_STACK
select ARCH_USE_CMPXCHG_LOCKREF if PPC64
select HAVE_ARCH_AUDITSYSCALL
+ select ARCH_SUPPORTS_ATOMIC_RMW
config GENERIC_CSUM
def_bool CPU_LITTLE_ENDIAN
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index bc23477..0fdd7ee 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -447,6 +447,7 @@
CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_DAWR | \
CPU_FTR_ARCH_207S | CPU_FTR_TM_COMP)
#define CPU_FTRS_POWER8E (CPU_FTRS_POWER8 | CPU_FTR_PMAO_BUG)
+#define CPU_FTRS_POWER8_DD1 (CPU_FTRS_POWER8 & ~CPU_FTR_DBELL)
#define CPU_FTRS_CELL (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \
diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h
index fddb72b..d645428 100644
--- a/arch/powerpc/include/asm/kvm_book3s_64.h
+++ b/arch/powerpc/include/asm/kvm_book3s_64.h
@@ -198,8 +198,10 @@
return rb;
}
-static inline unsigned long hpte_page_size(unsigned long h, unsigned long l)
+static inline unsigned long __hpte_page_size(unsigned long h, unsigned long l,
+ bool is_base_size)
{
+
int size, a_psize;
/* Look at the 8 bit LP value */
unsigned int lp = (l >> LP_SHIFT) & ((1 << LP_BITS) - 1);
@@ -214,14 +216,27 @@
continue;
a_psize = __hpte_actual_psize(lp, size);
- if (a_psize != -1)
+ if (a_psize != -1) {
+ if (is_base_size)
+ return 1ul << mmu_psize_defs[size].shift;
return 1ul << mmu_psize_defs[a_psize].shift;
+ }
}
}
return 0;
}
+static inline unsigned long hpte_page_size(unsigned long h, unsigned long l)
+{
+ return __hpte_page_size(h, l, 0);
+}
+
+static inline unsigned long hpte_base_page_size(unsigned long h, unsigned long l)
+{
+ return __hpte_page_size(h, l, 1);
+}
+
static inline unsigned long hpte_rpn(unsigned long ptel, unsigned long psize)
{
return ((ptel & HPTE_R_RPN) & ~(psize - 1)) >> PAGE_SHIFT;
diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h
index 807014d..c2b4dcf 100644
--- a/arch/powerpc/include/asm/mmu-hash64.h
+++ b/arch/powerpc/include/asm/mmu-hash64.h
@@ -22,6 +22,7 @@
*/
#include <asm/pgtable-ppc64.h>
#include <asm/bug.h>
+#include <asm/processor.h>
/*
* Segment table
@@ -496,7 +497,7 @@
*/
struct subpage_prot_table {
unsigned long maxaddr; /* only addresses < this are protected */
- unsigned int **protptrs[2];
+ unsigned int **protptrs[(TASK_SIZE_USER64 >> 43)];
unsigned int *low_prot[4];
};
diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
index 9ea266e..7e46125 100644
--- a/arch/powerpc/include/asm/ppc_asm.h
+++ b/arch/powerpc/include/asm/ppc_asm.h
@@ -277,6 +277,8 @@
.globl n; \
n:
+#define _GLOBAL_TOC(name) _GLOBAL(name)
+
#define _KPROBE(n) \
.section ".kprobes.text","a"; \
.globl n; \
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index 965291b..0c15764 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -527,6 +527,26 @@
.machine_check_early = __machine_check_early_realmode_p8,
.platform = "power8",
},
+ { /* Power8 DD1: Does not support doorbell IPIs */
+ .pvr_mask = 0xffffff00,
+ .pvr_value = 0x004d0100,
+ .cpu_name = "POWER8 (raw)",
+ .cpu_features = CPU_FTRS_POWER8_DD1,
+ .cpu_user_features = COMMON_USER_POWER8,
+ .cpu_user_features2 = COMMON_USER2_POWER8,
+ .mmu_features = MMU_FTRS_POWER8,
+ .icache_bsize = 128,
+ .dcache_bsize = 128,
+ .num_pmcs = 6,
+ .pmc_type = PPC_PMC_IBM,
+ .oprofile_cpu_type = "ppc64/power8",
+ .oprofile_type = PPC_OPROFILE_INVALID,
+ .cpu_setup = __setup_cpu_power8,
+ .cpu_restore = __restore_cpu_power8,
+ .flush_tlb = __flush_tlb_power8,
+ .machine_check_early = __machine_check_early_realmode_p8,
+ .platform = "power8",
+ },
{ /* Power8 */
.pvr_mask = 0xffff0000,
.pvr_value = 0x004d0000,
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 51a3ff7..1007fb8 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -747,7 +747,7 @@
#ifdef CONFIG_SCHED_SMT
/* cpumask of CPUs with asymetric SMT dependancy */
-static const int powerpc_smt_flags(void)
+static int powerpc_smt_flags(void)
{
int flags = SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index 8056107..68468d6 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -1562,7 +1562,7 @@
goto out;
}
if (!rma_setup && is_vrma_hpte(v)) {
- unsigned long psize = hpte_page_size(v, r);
+ unsigned long psize = hpte_base_page_size(v, r);
unsigned long senc = slb_pgsize_encoding(psize);
unsigned long lpcr;
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index 6e62243..5a24d3c 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -814,13 +814,10 @@
r = hpte[i+1];
/*
- * Check the HPTE again, including large page size
- * Since we don't currently allow any MPSS (mixed
- * page-size segment) page sizes, it is sufficient
- * to check against the actual page size.
+ * Check the HPTE again, including base page size
*/
if ((v & valid) && (v & mask) == val &&
- hpte_page_size(v, r) == (1ul << pshift))
+ hpte_base_page_size(v, r) == (1ul << pshift))
/* Return with the HPTE still locked */
return (hash << 3) + (i >> 1);
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 868347e..558a67d 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -48,7 +48,7 @@
*
* LR = return address to continue at after eventually re-enabling MMU
*/
-_GLOBAL(kvmppc_hv_entry_trampoline)
+_GLOBAL_TOC(kvmppc_hv_entry_trampoline)
mflr r0
std r0, PPC_LR_STKOFF(r1)
stdu r1, -112(r1)
diff --git a/arch/powerpc/kvm/book3s_interrupts.S b/arch/powerpc/kvm/book3s_interrupts.S
index e2c29e3..d044b8b 100644
--- a/arch/powerpc/kvm/book3s_interrupts.S
+++ b/arch/powerpc/kvm/book3s_interrupts.S
@@ -25,7 +25,11 @@
#include <asm/exception-64s.h>
#if defined(CONFIG_PPC_BOOK3S_64)
+#if defined(_CALL_ELF) && _CALL_ELF == 2
+#define FUNC(name) name
+#else
#define FUNC(name) GLUE(.,name)
+#endif
#define GET_SHADOW_VCPU(reg) addi reg, r13, PACA_SVCPU
#elif defined(CONFIG_PPC_BOOK3S_32)
diff --git a/arch/powerpc/kvm/book3s_rmhandlers.S b/arch/powerpc/kvm/book3s_rmhandlers.S
index 9eec675..16c4d88 100644
--- a/arch/powerpc/kvm/book3s_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_rmhandlers.S
@@ -36,7 +36,11 @@
#if defined(CONFIG_PPC_BOOK3S_64)
+#if defined(_CALL_ELF) && _CALL_ELF == 2
+#define FUNC(name) name
+#else
#define FUNC(name) GLUE(.,name)
+#endif
#elif defined(CONFIG_PPC_BOOK3S_32)
@@ -146,7 +150,7 @@
* On entry, r4 contains the guest shadow MSR
* MSR.EE has to be 0 when calling this function
*/
-_GLOBAL(kvmppc_entry_trampoline)
+_GLOBAL_TOC(kvmppc_entry_trampoline)
mfmsr r5
LOAD_REG_ADDR(r7, kvmppc_handler_trampoline_enter)
toreal(r7)
diff --git a/arch/powerpc/kvm/book3s_rtas.c b/arch/powerpc/kvm/book3s_rtas.c
index edb14ba..ef27fbd 100644
--- a/arch/powerpc/kvm/book3s_rtas.c
+++ b/arch/powerpc/kvm/book3s_rtas.c
@@ -23,20 +23,20 @@
u32 irq, server, priority;
int rc;
- if (args->nargs != 3 || args->nret != 1) {
+ if (be32_to_cpu(args->nargs) != 3 || be32_to_cpu(args->nret) != 1) {
rc = -3;
goto out;
}
- irq = args->args[0];
- server = args->args[1];
- priority = args->args[2];
+ irq = be32_to_cpu(args->args[0]);
+ server = be32_to_cpu(args->args[1]);
+ priority = be32_to_cpu(args->args[2]);
rc = kvmppc_xics_set_xive(vcpu->kvm, irq, server, priority);
if (rc)
rc = -3;
out:
- args->rets[0] = rc;
+ args->rets[0] = cpu_to_be32(rc);
}
static void kvm_rtas_get_xive(struct kvm_vcpu *vcpu, struct rtas_args *args)
@@ -44,12 +44,12 @@
u32 irq, server, priority;
int rc;
- if (args->nargs != 1 || args->nret != 3) {
+ if (be32_to_cpu(args->nargs) != 1 || be32_to_cpu(args->nret) != 3) {
rc = -3;
goto out;
}
- irq = args->args[0];
+ irq = be32_to_cpu(args->args[0]);
server = priority = 0;
rc = kvmppc_xics_get_xive(vcpu->kvm, irq, &server, &priority);
@@ -58,10 +58,10 @@
goto out;
}
- args->rets[1] = server;
- args->rets[2] = priority;
+ args->rets[1] = cpu_to_be32(server);
+ args->rets[2] = cpu_to_be32(priority);
out:
- args->rets[0] = rc;
+ args->rets[0] = cpu_to_be32(rc);
}
static void kvm_rtas_int_off(struct kvm_vcpu *vcpu, struct rtas_args *args)
@@ -69,18 +69,18 @@
u32 irq;
int rc;
- if (args->nargs != 1 || args->nret != 1) {
+ if (be32_to_cpu(args->nargs) != 1 || be32_to_cpu(args->nret) != 1) {
rc = -3;
goto out;
}
- irq = args->args[0];
+ irq = be32_to_cpu(args->args[0]);
rc = kvmppc_xics_int_off(vcpu->kvm, irq);
if (rc)
rc = -3;
out:
- args->rets[0] = rc;
+ args->rets[0] = cpu_to_be32(rc);
}
static void kvm_rtas_int_on(struct kvm_vcpu *vcpu, struct rtas_args *args)
@@ -88,18 +88,18 @@
u32 irq;
int rc;
- if (args->nargs != 1 || args->nret != 1) {
+ if (be32_to_cpu(args->nargs) != 1 || be32_to_cpu(args->nret) != 1) {
rc = -3;
goto out;
}
- irq = args->args[0];
+ irq = be32_to_cpu(args->args[0]);
rc = kvmppc_xics_int_on(vcpu->kvm, irq);
if (rc)
rc = -3;
out:
- args->rets[0] = rc;
+ args->rets[0] = cpu_to_be32(rc);
}
#endif /* CONFIG_KVM_XICS */
@@ -205,32 +205,6 @@
return rc;
}
-static void kvmppc_rtas_swap_endian_in(struct rtas_args *args)
-{
-#ifdef __LITTLE_ENDIAN__
- int i;
-
- args->token = be32_to_cpu(args->token);
- args->nargs = be32_to_cpu(args->nargs);
- args->nret = be32_to_cpu(args->nret);
- for (i = 0; i < args->nargs; i++)
- args->args[i] = be32_to_cpu(args->args[i]);
-#endif
-}
-
-static void kvmppc_rtas_swap_endian_out(struct rtas_args *args)
-{
-#ifdef __LITTLE_ENDIAN__
- int i;
-
- for (i = 0; i < args->nret; i++)
- args->args[i] = cpu_to_be32(args->args[i]);
- args->token = cpu_to_be32(args->token);
- args->nargs = cpu_to_be32(args->nargs);
- args->nret = cpu_to_be32(args->nret);
-#endif
-}
-
int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
{
struct rtas_token_definition *d;
@@ -249,8 +223,6 @@
if (rc)
goto fail;
- kvmppc_rtas_swap_endian_in(&args);
-
/*
* args->rets is a pointer into args->args. Now that we've
* copied args we need to fix it up to point into our copy,
@@ -258,13 +230,13 @@
* value so we can restore it on the way out.
*/
orig_rets = args.rets;
- args.rets = &args.args[args.nargs];
+ args.rets = &args.args[be32_to_cpu(args.nargs)];
mutex_lock(&vcpu->kvm->lock);
rc = -ENOENT;
list_for_each_entry(d, &vcpu->kvm->arch.rtas_tokens, list) {
- if (d->token == args.token) {
+ if (d->token == be32_to_cpu(args.token)) {
d->handler->handler(vcpu, &args);
rc = 0;
break;
@@ -275,7 +247,6 @@
if (rc == 0) {
args.rets = orig_rets;
- kvmppc_rtas_swap_endian_out(&args);
rc = kvm_write_guest(vcpu->kvm, args_phys, &args, sizeof(args));
if (rc)
goto fail;
diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c
index dd2cc03..86903d3 100644
--- a/arch/powerpc/kvm/e500_mmu_host.c
+++ b/arch/powerpc/kvm/e500_mmu_host.c
@@ -473,7 +473,8 @@
if (printk_ratelimit())
pr_err("%s: pte not present: gfn %lx, pfn %lx\n",
__func__, (long)gfn, pfn);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
kvmppc_e500_ref_setup(ref, gtlbe, pfn, wimg);
diff --git a/arch/powerpc/lib/mem_64.S b/arch/powerpc/lib/mem_64.S
index 0738f96..43435c6 100644
--- a/arch/powerpc/lib/mem_64.S
+++ b/arch/powerpc/lib/mem_64.S
@@ -77,7 +77,7 @@
stb r4,0(r6)
blr
-_GLOBAL(memmove)
+_GLOBAL_TOC(memmove)
cmplw 0,r3,r4
bgt backwards_memcpy
b memcpy
diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c
index 412dd46..5c09f36 100644
--- a/arch/powerpc/lib/sstep.c
+++ b/arch/powerpc/lib/sstep.c
@@ -1198,7 +1198,7 @@
sh = regs->gpr[rb] & 0x3f;
ival = (signed int) regs->gpr[rd];
regs->gpr[ra] = ival >> (sh < 32 ? sh : 31);
- if (ival < 0 && (sh >= 32 || (ival & ((1 << sh) - 1)) != 0))
+ if (ival < 0 && (sh >= 32 || (ival & ((1ul << sh) - 1)) != 0))
regs->xer |= XER_CA;
else
regs->xer &= ~XER_CA;
@@ -1208,7 +1208,7 @@
sh = rb;
ival = (signed int) regs->gpr[rd];
regs->gpr[ra] = ival >> sh;
- if (ival < 0 && (ival & ((1 << sh) - 1)) != 0)
+ if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0)
regs->xer |= XER_CA;
else
regs->xer &= ~XER_CA;
@@ -1216,7 +1216,7 @@
#ifdef __powerpc64__
case 27: /* sld */
- sh = regs->gpr[rd] & 0x7f;
+ sh = regs->gpr[rb] & 0x7f;
if (sh < 64)
regs->gpr[ra] = regs->gpr[rd] << sh;
else
@@ -1235,7 +1235,7 @@
sh = regs->gpr[rb] & 0x7f;
ival = (signed long int) regs->gpr[rd];
regs->gpr[ra] = ival >> (sh < 64 ? sh : 63);
- if (ival < 0 && (sh >= 64 || (ival & ((1 << sh) - 1)) != 0))
+ if (ival < 0 && (sh >= 64 || (ival & ((1ul << sh) - 1)) != 0))
regs->xer |= XER_CA;
else
regs->xer &= ~XER_CA;
@@ -1246,7 +1246,7 @@
sh = rb | ((instr & 2) << 4);
ival = (signed long int) regs->gpr[rd];
regs->gpr[ra] = ival >> sh;
- if (ival < 0 && (ival & ((1 << sh) - 1)) != 0)
+ if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0)
regs->xer |= XER_CA;
else
regs->xer &= ~XER_CA;
diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
index 6dcdade..82e82ca 100644
--- a/arch/powerpc/net/bpf_jit_comp.c
+++ b/arch/powerpc/net/bpf_jit_comp.c
@@ -390,12 +390,16 @@
case BPF_ANC | SKF_AD_VLAN_TAG:
case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT:
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
+ BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000);
+
PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
vlan_tci));
- if (code == (BPF_ANC | SKF_AD_VLAN_TAG))
- PPC_ANDI(r_A, r_A, VLAN_VID_MASK);
- else
+ if (code == (BPF_ANC | SKF_AD_VLAN_TAG)) {
+ PPC_ANDI(r_A, r_A, ~VLAN_TAG_PRESENT);
+ } else {
PPC_ANDI(r_A, r_A, VLAN_TAG_PRESENT);
+ PPC_SRWI(r_A, r_A, 12);
+ }
break;
case BPF_ANC | SKF_AD_QUEUE:
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c
index 022b38e6..2d0b4d6 100644
--- a/arch/powerpc/platforms/pseries/dlpar.c
+++ b/arch/powerpc/platforms/pseries/dlpar.c
@@ -86,6 +86,7 @@
}
of_node_set_flag(dn, OF_DYNAMIC);
+ of_node_init(dn);
return dn;
}
diff --git a/arch/powerpc/platforms/pseries/reconfig.c b/arch/powerpc/platforms/pseries/reconfig.c
index 0435bb6..1c0a60d 100644
--- a/arch/powerpc/platforms/pseries/reconfig.c
+++ b/arch/powerpc/platforms/pseries/reconfig.c
@@ -69,6 +69,7 @@
np->properties = proplist;
of_node_set_flag(np, OF_DYNAMIC);
+ of_node_init(np);
np->parent = derive_parent(path);
if (IS_ERR(np->parent)) {
diff --git a/arch/s390/include/asm/switch_to.h b/arch/s390/include/asm/switch_to.h
index df38c70..18ea9e3 100644
--- a/arch/s390/include/asm/switch_to.h
+++ b/arch/s390/include/asm/switch_to.h
@@ -51,8 +51,8 @@
return 0;
asm volatile(
- "0: lfpc %1\n"
- " la %0,0\n"
+ " lfpc %1\n"
+ "0: la %0,0\n"
"1:\n"
EX_TABLE(0b,1b)
: "=d" (rc) : "Q" (*fpc), "0" (-EINVAL));
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S
index 7ba7d67..e88d35d 100644
--- a/arch/s390/kernel/head.S
+++ b/arch/s390/kernel/head.S
@@ -437,11 +437,11 @@
#if defined(CONFIG_64BIT)
#if defined(CONFIG_MARCH_ZEC12)
- .long 3, 0xc100efea, 0xf46ce800, 0x00400000
+ .long 3, 0xc100eff2, 0xf46ce800, 0x00400000
#elif defined(CONFIG_MARCH_Z196)
- .long 2, 0xc100efea, 0xf46c0000
+ .long 2, 0xc100eff2, 0xf46c0000
#elif defined(CONFIG_MARCH_Z10)
- .long 2, 0xc100efea, 0xf0680000
+ .long 2, 0xc100eff2, 0xf0680000
#elif defined(CONFIG_MARCH_Z9_109)
.long 1, 0xc100efc2
#elif defined(CONFIG_MARCH_Z990)
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index 2d716734..5dc7ad9 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -334,9 +334,14 @@
unsigned long mask = PSW_MASK_USER;
mask |= is_ri_task(child) ? PSW_MASK_RI : 0;
- if ((data & ~mask) != PSW_USER_BITS)
+ if ((data ^ PSW_USER_BITS) & ~mask)
+ /* Invalid psw mask. */
+ return -EINVAL;
+ if ((data & PSW_MASK_ASC) == PSW_ASC_HOME)
+ /* Invalid address-space-control bits */
return -EINVAL;
if ((data & PSW_MASK_EA) && !(data & PSW_MASK_BA))
+ /* Invalid addressing mode bits */
return -EINVAL;
}
*(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr) = data;
@@ -672,9 +677,12 @@
mask |= is_ri_task(child) ? PSW32_MASK_RI : 0;
/* Build a 64 bit psw mask from 31 bit mask. */
- if ((tmp & ~mask) != PSW32_USER_BITS)
+ if ((tmp ^ PSW32_USER_BITS) & ~mask)
/* Invalid psw mask. */
return -EINVAL;
+ if ((data & PSW32_MASK_ASC) == PSW32_ASC_HOME)
+ /* Invalid address-space-control bits */
+ return -EINVAL;
regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) |
(regs->psw.mask & PSW_MASK_BA) |
(__u64)(tmp & mask) << 32;
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
index 9ddc51e..30de427 100644
--- a/arch/s390/pci/pci.c
+++ b/arch/s390/pci/pci.c
@@ -48,13 +48,10 @@
static LIST_HEAD(zpci_list);
static DEFINE_SPINLOCK(zpci_list_lock);
-static void zpci_enable_irq(struct irq_data *data);
-static void zpci_disable_irq(struct irq_data *data);
-
static struct irq_chip zpci_irq_chip = {
.name = "zPCI",
- .irq_unmask = zpci_enable_irq,
- .irq_mask = zpci_disable_irq,
+ .irq_unmask = unmask_msi_irq,
+ .irq_mask = mask_msi_irq,
};
static DECLARE_BITMAP(zpci_domain, ZPCI_NR_DEVICES);
@@ -244,43 +241,6 @@
return rc;
}
-static int zpci_msi_set_mask_bits(struct msi_desc *msi, u32 mask, u32 flag)
-{
- int offset, pos;
- u32 mask_bits;
-
- if (msi->msi_attrib.is_msix) {
- offset = msi->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
- PCI_MSIX_ENTRY_VECTOR_CTRL;
- msi->masked = readl(msi->mask_base + offset);
- writel(flag, msi->mask_base + offset);
- } else if (msi->msi_attrib.maskbit) {
- pos = (long) msi->mask_base;
- pci_read_config_dword(msi->dev, pos, &mask_bits);
- mask_bits &= ~(mask);
- mask_bits |= flag & mask;
- pci_write_config_dword(msi->dev, pos, mask_bits);
- } else
- return 0;
-
- msi->msi_attrib.maskbit = !!flag;
- return 1;
-}
-
-static void zpci_enable_irq(struct irq_data *data)
-{
- struct msi_desc *msi = irq_get_msi_desc(data->irq);
-
- zpci_msi_set_mask_bits(msi, 1, 0);
-}
-
-static void zpci_disable_irq(struct irq_data *data)
-{
- struct msi_desc *msi = irq_get_msi_desc(data->irq);
-
- zpci_msi_set_mask_bits(msi, 1, 1);
-}
-
void pcibios_fixup_bus(struct pci_bus *bus)
{
}
@@ -487,7 +447,10 @@
/* Release MSI interrupts */
list_for_each_entry(msi, &pdev->msi_list, list) {
- zpci_msi_set_mask_bits(msi, 1, 1);
+ if (msi->msi_attrib.is_msix)
+ default_msix_mask_irq(msi, 1);
+ else
+ default_msi_mask_irq(msi, 1, 1);
irq_set_msi_desc(msi->irq, NULL);
irq_free_desc(msi->irq);
msi->msg.address_lo = 0;
diff --git a/arch/sh/Makefile b/arch/sh/Makefile
index d4d16e4..bf5b3f5 100644
--- a/arch/sh/Makefile
+++ b/arch/sh/Makefile
@@ -32,7 +32,8 @@
cflags-$(CONFIG_CPU_SH2) := $(call cc-option,-m2,)
cflags-$(CONFIG_CPU_SH2A) += $(call cc-option,-m2a,) \
- $(call cc-option,-m2a-nofpu,)
+ $(call cc-option,-m2a-nofpu,) \
+ $(call cc-option,-m4-nofpu,)
cflags-$(CONFIG_CPU_SH3) := $(call cc-option,-m3,)
cflags-$(CONFIG_CPU_SH4) := $(call cc-option,-m4,) \
$(call cc-option,-mno-implicit-fp,-m4-nofpu)
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 29f2e98..407c87d 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -78,6 +78,7 @@
select HAVE_C_RECORDMCOUNT
select NO_BOOTMEM
select HAVE_ARCH_AUDITSYSCALL
+ select ARCH_SUPPORTS_ATOMIC_RMW
config ARCH_DEFCONFIG
string
diff --git a/arch/sparc/include/uapi/asm/unistd.h b/arch/sparc/include/uapi/asm/unistd.h
index b73274f..42f2bca 100644
--- a/arch/sparc/include/uapi/asm/unistd.h
+++ b/arch/sparc/include/uapi/asm/unistd.h
@@ -410,8 +410,9 @@
#define __NR_finit_module 342
#define __NR_sched_setattr 343
#define __NR_sched_getattr 344
+#define __NR_renameat2 345
-#define NR_syscalls 345
+#define NR_syscalls 346
/* Bitmask values returned from kern_features system call. */
#define KERN_FEATURE_MIXED_MODE_STACK 0x00000001
diff --git a/arch/sparc/kernel/sys32.S b/arch/sparc/kernel/sys32.S
index d066eb1..f834224 100644
--- a/arch/sparc/kernel/sys32.S
+++ b/arch/sparc/kernel/sys32.S
@@ -48,6 +48,7 @@
SIGN1(sys32_recvfrom, compat_sys_recvfrom, %o0)
SIGN1(sys32_recvmsg, compat_sys_recvmsg, %o0)
SIGN1(sys32_sendmsg, compat_sys_sendmsg, %o0)
+SIGN2(sys32_renameat2, sys_renameat2, %o0, %o2)
.globl sys32_mmap2
sys32_mmap2:
diff --git a/arch/sparc/kernel/systbls_32.S b/arch/sparc/kernel/systbls_32.S
index 151ace8..85fe9b1 100644
--- a/arch/sparc/kernel/systbls_32.S
+++ b/arch/sparc/kernel/systbls_32.S
@@ -86,3 +86,4 @@
/*330*/ .long sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime
/*335*/ .long sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev
/*340*/ .long sys_ni_syscall, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
+/*345*/ .long sys_renameat2
diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S
index 4bd4e2b..33ecba2 100644
--- a/arch/sparc/kernel/systbls_64.S
+++ b/arch/sparc/kernel/systbls_64.S
@@ -87,6 +87,7 @@
/*330*/ .word compat_sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, compat_sys_open_by_handle_at, compat_sys_clock_adjtime
.word sys_syncfs, compat_sys_sendmmsg, sys_setns, compat_sys_process_vm_readv, compat_sys_process_vm_writev
/*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
+ .word sys32_renameat2
#endif /* CONFIG_COMPAT */
@@ -165,3 +166,4 @@
/*330*/ .word sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime
.word sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev
/*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
+ .word sys_renameat2
diff --git a/arch/um/kernel/tlb.c b/arch/um/kernel/tlb.c
index 9472079..f1b3eb1 100644
--- a/arch/um/kernel/tlb.c
+++ b/arch/um/kernel/tlb.c
@@ -12,6 +12,7 @@
#include <mem_user.h>
#include <os.h>
#include <skas.h>
+#include <kern_util.h>
struct host_vm_change {
struct host_vm_op {
@@ -124,6 +125,9 @@
struct host_vm_op *last;
int ret = 0;
+ if ((addr >= STUB_START) && (addr < STUB_END))
+ return -EINVAL;
+
if (hvc->index != 0) {
last = &hvc->ops[hvc->index - 1];
if ((last->type == MUNMAP) &&
@@ -283,8 +287,11 @@
/* This is not an else because ret is modified above */
if (ret) {
printk(KERN_ERR "fix_range_common: failed, killing current "
- "process\n");
+ "process: %d\n", task_tgid_vnr(current));
+ /* We are under mmap_sem, release it such that current can terminate */
+ up_write(¤t->mm->mmap_sem);
force_sig(SIGKILL, current);
+ do_signal();
}
}
diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c
index 974b874..5678c35 100644
--- a/arch/um/kernel/trap.c
+++ b/arch/um/kernel/trap.c
@@ -206,7 +206,7 @@
int is_write = FAULT_WRITE(fi);
unsigned long address = FAULT_ADDRESS(fi);
- if (regs)
+ if (!is_user && regs)
current->thread.segv_regs = container_of(regs, struct pt_regs, regs);
if (!is_user && (address >= start_vm) && (address < end_vm)) {
diff --git a/arch/um/os-Linux/skas/process.c b/arch/um/os-Linux/skas/process.c
index d531879..908579f 100644
--- a/arch/um/os-Linux/skas/process.c
+++ b/arch/um/os-Linux/skas/process.c
@@ -54,7 +54,7 @@
void wait_stub_done(int pid)
{
- int n, status, err, bad_stop = 0;
+ int n, status, err;
while (1) {
CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED | __WALL));
@@ -74,8 +74,6 @@
if (((1 << WSTOPSIG(status)) & STUB_DONE_MASK) != 0)
return;
- else
- bad_stop = 1;
bad_wait:
err = ptrace_dump_regs(pid);
@@ -85,10 +83,7 @@
printk(UM_KERN_ERR "wait_stub_done : failed to wait for SIGTRAP, "
"pid = %d, n = %d, errno = %d, status = 0x%x\n", pid, n, errno,
status);
- if (bad_stop)
- kill(pid, SIGKILL);
- else
- fatal_sigsegv();
+ fatal_sigsegv();
}
extern unsigned long current_stub_stack(void);
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index a8f749e..70c43b5 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -21,6 +21,7 @@
### Arch settings
config X86
def_bool y
+ select ARCH_MIGHT_HAVE_ACPI_PDC if ACPI
select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
select ARCH_MIGHT_HAVE_PC_PARPORT
select ARCH_MIGHT_HAVE_PC_SERIO
@@ -131,6 +132,8 @@
select HAVE_CC_STACKPROTECTOR
select GENERIC_CPU_AUTOPROBE
select HAVE_ARCH_AUDITSYSCALL
+ select ARCH_SUPPORTS_ATOMIC_RMW
+ select ACPI_LEGACY_TABLES_LOOKUP if ACPI
config INSTRUCTION_DECODER
def_bool y
diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
index 84c2234..7a6d43a 100644
--- a/arch/x86/boot/header.S
+++ b/arch/x86/boot/header.S
@@ -91,10 +91,9 @@
.section ".bsdata", "a"
bugger_off_msg:
- .ascii "Direct floppy boot is not supported. "
- .ascii "Use a boot loader program instead.\r\n"
+ .ascii "Use a boot loader.\r\n"
.ascii "\n"
- .ascii "Remove disk and press any key to reboot ...\r\n"
+ .ascii "Remove disk and press any key to reboot...\r\n"
.byte 0
#ifdef CONFIG_EFI_STUB
@@ -108,7 +107,7 @@
#else
.word 0x8664 # x86-64
#endif
- .word 3 # nr_sections
+ .word 4 # nr_sections
.long 0 # TimeDateStamp
.long 0 # PointerToSymbolTable
.long 1 # NumberOfSymbols
@@ -250,6 +249,25 @@
.word 0 # NumberOfLineNumbers
.long 0x60500020 # Characteristics (section flags)
+ #
+ # The offset & size fields are filled in by build.c.
+ #
+ .ascii ".bss"
+ .byte 0
+ .byte 0
+ .byte 0
+ .byte 0
+ .long 0
+ .long 0x0
+ .long 0 # Size of initialized data
+ # on disk
+ .long 0x0
+ .long 0 # PointerToRelocations
+ .long 0 # PointerToLineNumbers
+ .word 0 # NumberOfRelocations
+ .word 0 # NumberOfLineNumbers
+ .long 0xc8000080 # Characteristics (section flags)
+
#endif /* CONFIG_EFI_STUB */
# Kernel attributes; used by setup. This is part 1 of the
diff --git a/arch/x86/boot/tools/build.c b/arch/x86/boot/tools/build.c
index 1a2f212..a7661c4 100644
--- a/arch/x86/boot/tools/build.c
+++ b/arch/x86/boot/tools/build.c
@@ -143,7 +143,7 @@
#ifdef CONFIG_EFI_STUB
-static void update_pecoff_section_header(char *section_name, u32 offset, u32 size)
+static void update_pecoff_section_header_fields(char *section_name, u32 vma, u32 size, u32 datasz, u32 offset)
{
unsigned int pe_header;
unsigned short num_sections;
@@ -164,10 +164,10 @@
put_unaligned_le32(size, section + 0x8);
/* section header vma field */
- put_unaligned_le32(offset, section + 0xc);
+ put_unaligned_le32(vma, section + 0xc);
/* section header 'size of initialised data' field */
- put_unaligned_le32(size, section + 0x10);
+ put_unaligned_le32(datasz, section + 0x10);
/* section header 'file offset' field */
put_unaligned_le32(offset, section + 0x14);
@@ -179,6 +179,11 @@
}
}
+static void update_pecoff_section_header(char *section_name, u32 offset, u32 size)
+{
+ update_pecoff_section_header_fields(section_name, offset, size, size, offset);
+}
+
static void update_pecoff_setup_and_reloc(unsigned int size)
{
u32 setup_offset = 0x200;
@@ -203,9 +208,6 @@
pe_header = get_unaligned_le32(&buf[0x3c]);
- /* Size of image */
- put_unaligned_le32(file_sz, &buf[pe_header + 0x50]);
-
/*
* Size of code: Subtract the size of the first sector (512 bytes)
* which includes the header.
@@ -220,6 +222,22 @@
update_pecoff_section_header(".text", text_start, text_sz);
}
+static void update_pecoff_bss(unsigned int file_sz, unsigned int init_sz)
+{
+ unsigned int pe_header;
+ unsigned int bss_sz = init_sz - file_sz;
+
+ pe_header = get_unaligned_le32(&buf[0x3c]);
+
+ /* Size of uninitialized data */
+ put_unaligned_le32(bss_sz, &buf[pe_header + 0x24]);
+
+ /* Size of image */
+ put_unaligned_le32(init_sz, &buf[pe_header + 0x50]);
+
+ update_pecoff_section_header_fields(".bss", file_sz, bss_sz, 0, 0);
+}
+
static int reserve_pecoff_reloc_section(int c)
{
/* Reserve 0x20 bytes for .reloc section */
@@ -259,6 +277,8 @@
static inline void update_pecoff_setup_and_reloc(unsigned int size) {}
static inline void update_pecoff_text(unsigned int text_start,
unsigned int file_sz) {}
+static inline void update_pecoff_bss(unsigned int file_sz,
+ unsigned int init_sz) {}
static inline void efi_stub_defaults(void) {}
static inline void efi_stub_entry_update(void) {}
@@ -310,7 +330,7 @@
int main(int argc, char ** argv)
{
- unsigned int i, sz, setup_sectors;
+ unsigned int i, sz, setup_sectors, init_sz;
int c;
u32 sys_size;
struct stat sb;
@@ -376,7 +396,9 @@
buf[0x1f1] = setup_sectors-1;
put_unaligned_le32(sys_size, &buf[0x1f4]);
- update_pecoff_text(setup_sectors * 512, sz + i + ((sys_size * 16) - sz));
+ update_pecoff_text(setup_sectors * 512, i + (sys_size * 16));
+ init_sz = get_unaligned_le32(&buf[0x260]);
+ update_pecoff_bss(i + (sys_size * 16), init_sz);
efi_stub_entry_update();
diff --git a/arch/x86/include/asm/acenv.h b/arch/x86/include/asm/acenv.h
index 6687329..1b010a8 100644
--- a/arch/x86/include/asm/acenv.h
+++ b/arch/x86/include/asm/acenv.h
@@ -18,8 +18,6 @@
#define ACPI_FLUSH_CPU_CACHE() wbinvd()
-#ifdef CONFIG_ACPI
-
int __acpi_acquire_global_lock(unsigned int *lock);
int __acpi_release_global_lock(unsigned int *lock);
@@ -44,6 +42,4 @@
: "=r"(n_hi), "=r"(n_lo) \
: "0"(n_hi), "1"(n_lo))
-#endif
-
#endif /* _ASM_X86_ACENV_H */
diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h
index e06225e..0ab4f9f 100644
--- a/arch/x86/include/asm/acpi.h
+++ b/arch/x86/include/asm/acpi.h
@@ -121,6 +121,11 @@
buf[2] &= ~(ACPI_PDC_C_C2C3_FFH);
}
+static inline bool acpi_has_cpu_in_madt(void)
+{
+ return !!acpi_lapic;
+}
+
#else /* !CONFIG_ACPI */
#define acpi_lapic 0
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
index f3a1f04..5848744 100644
--- a/arch/x86/kernel/apm_32.c
+++ b/arch/x86/kernel/apm_32.c
@@ -841,7 +841,6 @@
u32 eax;
u8 ret = 0;
int idled = 0;
- int polling;
int err = 0;
if (!need_resched()) {
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index a800290..f9e4fdd 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -370,6 +370,17 @@
*/
detect_extended_topology(c);
+ if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) {
+ /*
+ * let's use the legacy cpuid vector 0x1 and 0x4 for topology
+ * detection.
+ */
+ c->x86_max_cores = intel_num_cpu_cores(c);
+#ifdef CONFIG_X86_32
+ detect_ht(c);
+#endif
+ }
+
l2 = init_intel_cacheinfo(c);
if (c->cpuid_level > 9) {
unsigned eax = cpuid_eax(10);
@@ -438,17 +449,6 @@
set_cpu_cap(c, X86_FEATURE_P3);
#endif
- if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) {
- /*
- * let's use the legacy cpuid vector 0x1 and 0x4 for topology
- * detection.
- */
- c->x86_max_cores = intel_num_cpu_cores(c);
-#ifdef CONFIG_X86_32
- detect_ht(c);
-#endif
- }
-
/* Work around errata */
srat_detect_node(c);
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index a952e9c..9c8f739 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -730,6 +730,18 @@
#endif
}
+#ifdef CONFIG_X86_HT
+ /*
+ * If cpu_llc_id is not yet set, this means cpuid_level < 4 which in
+ * turns means that the only possibility is SMT (as indicated in
+ * cpuid1). Since cpuid2 doesn't specify shared caches, and we know
+ * that SMT shares all caches, we can unconditionally set cpu_llc_id to
+ * c->phys_proc_id.
+ */
+ if (per_cpu(cpu_llc_id, cpu) == BAD_APICID)
+ per_cpu(cpu_llc_id, cpu) = c->phys_proc_id;
+#endif
+
c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d));
return l2;
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index bb92f38..9a79c8d 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -2451,6 +2451,12 @@
for_each_online_cpu(i) {
err = mce_device_create(i);
if (err) {
+ /*
+ * Register notifier anyway (and do not unreg it) so
+ * that we don't leave undeleted timers, see notifier
+ * callback above.
+ */
+ __register_hotcpu_notifier(&mce_cpu_notifier);
cpu_notifier_register_done();
goto err_device_create;
}
@@ -2471,10 +2477,6 @@
err_register:
unregister_syscore_ops(&mce_syscore_ops);
- cpu_notifier_register_begin();
- __unregister_hotcpu_notifier(&mce_cpu_notifier);
- cpu_notifier_register_done();
-
err_device_create:
/*
* We didn't keep track of which devices were created above, but
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 2bdfbff..2879ecd 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -118,6 +118,9 @@
continue;
if (event->attr.config1 & ~er->valid_mask)
return -EINVAL;
+ /* Check if the extra msrs can be safely accessed*/
+ if (!er->extra_msr_access)
+ return -ENXIO;
reg->idx = er->idx;
reg->config = event->attr.config1;
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
index 3b2f9bd..8ade931 100644
--- a/arch/x86/kernel/cpu/perf_event.h
+++ b/arch/x86/kernel/cpu/perf_event.h
@@ -295,14 +295,16 @@
u64 config_mask;
u64 valid_mask;
int idx; /* per_xxx->regs[] reg index */
+ bool extra_msr_access;
};
#define EVENT_EXTRA_REG(e, ms, m, vm, i) { \
- .event = (e), \
- .msr = (ms), \
- .config_mask = (m), \
- .valid_mask = (vm), \
- .idx = EXTRA_REG_##i, \
+ .event = (e), \
+ .msr = (ms), \
+ .config_mask = (m), \
+ .valid_mask = (vm), \
+ .idx = EXTRA_REG_##i, \
+ .extra_msr_access = true, \
}
#define INTEL_EVENT_EXTRA_REG(event, msr, vm, idx) \
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index adb02aa..2502d0d 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -1382,6 +1382,15 @@
intel_pmu_lbr_read();
/*
+ * CondChgd bit 63 doesn't mean any overflow status. Ignore
+ * and clear the bit.
+ */
+ if (__test_and_clear_bit(63, (unsigned long *)&status)) {
+ if (!status)
+ goto done;
+ }
+
+ /*
* PEBS overflow sets bit 62 in the global status register
*/
if (__test_and_clear_bit(62, (unsigned long *)&status)) {
@@ -2173,6 +2182,41 @@
}
}
+/*
+ * Under certain circumstances, access certain MSR may cause #GP.
+ * The function tests if the input MSR can be safely accessed.
+ */
+static bool check_msr(unsigned long msr, u64 mask)
+{
+ u64 val_old, val_new, val_tmp;
+
+ /*
+ * Read the current value, change it and read it back to see if it
+ * matches, this is needed to detect certain hardware emulators
+ * (qemu/kvm) that don't trap on the MSR access and always return 0s.
+ */
+ if (rdmsrl_safe(msr, &val_old))
+ return false;
+
+ /*
+ * Only change the bits which can be updated by wrmsrl.
+ */
+ val_tmp = val_old ^ mask;
+ if (wrmsrl_safe(msr, val_tmp) ||
+ rdmsrl_safe(msr, &val_new))
+ return false;
+
+ if (val_new != val_tmp)
+ return false;
+
+ /* Here it's sure that the MSR can be safely accessed.
+ * Restore the old value and return.
+ */
+ wrmsrl(msr, val_old);
+
+ return true;
+}
+
static __init void intel_sandybridge_quirk(void)
{
x86_pmu.check_microcode = intel_snb_check_microcode;
@@ -2262,7 +2306,8 @@
union cpuid10_ebx ebx;
struct event_constraint *c;
unsigned int unused;
- int version;
+ struct extra_reg *er;
+ int version, i;
if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
switch (boot_cpu_data.x86) {
@@ -2465,6 +2510,9 @@
case 62: /* IvyBridge EP */
memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
+ /* dTLB-load-misses on IVB is different than SNB */
+ hw_cache_event_ids[C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = 0x8108; /* DTLB_LOAD_MISSES.DEMAND_LD_MISS_CAUSES_A_WALK */
+
memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
sizeof(hw_cache_extra_regs));
@@ -2565,6 +2613,34 @@
}
}
+ /*
+ * Access LBR MSR may cause #GP under certain circumstances.
+ * E.g. KVM doesn't support LBR MSR
+ * Check all LBT MSR here.
+ * Disable LBR access if any LBR MSRs can not be accessed.
+ */
+ if (x86_pmu.lbr_nr && !check_msr(x86_pmu.lbr_tos, 0x3UL))
+ x86_pmu.lbr_nr = 0;
+ for (i = 0; i < x86_pmu.lbr_nr; i++) {
+ if (!(check_msr(x86_pmu.lbr_from + i, 0xffffUL) &&
+ check_msr(x86_pmu.lbr_to + i, 0xffffUL)))
+ x86_pmu.lbr_nr = 0;
+ }
+
+ /*
+ * Access extra MSR may cause #GP under certain circumstances.
+ * E.g. KVM doesn't support offcore event
+ * Check all extra_regs here.
+ */
+ if (x86_pmu.extra_regs) {
+ for (er = x86_pmu.extra_regs; er->msr; er++) {
+ er->extra_msr_access = check_msr(er->msr, 0x1ffUL);
+ /* Disable LBR select mapping */
+ if ((er->idx == EXTRA_REG_LBR) && !er->extra_msr_access)
+ x86_pmu.lbr_sel_map = NULL;
+ }
+ }
+
/* Support full width counters using alternative MSR range */
if (x86_pmu.intel_cap.full_width_write) {
x86_pmu.max_period = x86_pmu.cntval_mask;
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c
index 980970c..696ade3 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_ds.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c
@@ -311,9 +311,11 @@
if (!x86_pmu.bts)
return 0;
- buffer = kzalloc_node(BTS_BUFFER_SIZE, GFP_KERNEL, node);
- if (unlikely(!buffer))
+ buffer = kzalloc_node(BTS_BUFFER_SIZE, GFP_KERNEL | __GFP_NOWARN, node);
+ if (unlikely(!buffer)) {
+ WARN_ONCE(1, "%s: BTS buffer allocation failure\n", __func__);
return -ENOMEM;
+ }
max = BTS_BUFFER_SIZE / BTS_RECORD_SIZE;
thresh = max / 16;
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
index 65bbbea..ae6552a 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
@@ -550,16 +550,16 @@
SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6),
SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8),
SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8),
- SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xc),
- SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xc),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xa),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xa),
SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x2),
SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x2),
SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x2),
SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x2),
SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x8),
SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x8),
- SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xc),
- SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xc),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xa),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xa),
SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x2),
SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x2),
SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x2),
@@ -1222,6 +1222,7 @@
SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2),
+
SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc),
SNBEP_CBO_EVENT_EXTRA_REG(0x5134, 0xffff, 0xc),
@@ -1245,7 +1246,7 @@
SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
- SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index dbaa23e..0d0c9d4 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -425,8 +425,8 @@
cmpl $(NR_syscalls), %eax
jae sysenter_badsys
call *sys_call_table(,%eax,4)
- movl %eax,PT_EAX(%esp)
sysenter_after_call:
+ movl %eax,PT_EAX(%esp)
LOCKDEP_SYS_EXIT
DISABLE_INTERRUPTS(CLBR_ANY)
TRACE_IRQS_OFF
@@ -502,6 +502,7 @@
jae syscall_badsys
syscall_call:
call *sys_call_table(,%eax,4)
+syscall_after_call:
movl %eax,PT_EAX(%esp) # store the return value
syscall_exit:
LOCKDEP_SYS_EXIT
@@ -675,12 +676,12 @@
END(syscall_fault)
syscall_badsys:
- movl $-ENOSYS,PT_EAX(%esp)
- jmp syscall_exit
+ movl $-ENOSYS,%eax
+ jmp syscall_after_call
END(syscall_badsys)
sysenter_badsys:
- movl $-ENOSYS,PT_EAX(%esp)
+ movl $-ENOSYS,%eax
jmp sysenter_after_call
END(syscall_badsys)
CFI_ENDPROC
diff --git a/arch/x86/kernel/espfix_64.c b/arch/x86/kernel/espfix_64.c
index 6afbb16..94d857f 100644
--- a/arch/x86/kernel/espfix_64.c
+++ b/arch/x86/kernel/espfix_64.c
@@ -175,7 +175,7 @@
if (!pud_present(pud)) {
pmd_p = (pmd_t *)__get_free_page(PGALLOC_GFP);
pud = __pud(__pa(pmd_p) | (PGTABLE_PROT & ptemask));
- paravirt_alloc_pud(&init_mm, __pa(pmd_p) >> PAGE_SHIFT);
+ paravirt_alloc_pmd(&init_mm, __pa(pmd_p) >> PAGE_SHIFT);
for (n = 0; n < ESPFIX_PUD_CLONES; n++)
set_pud(&pud_p[n], pud);
}
@@ -185,7 +185,7 @@
if (!pmd_present(pmd)) {
pte_p = (pte_t *)__get_free_page(PGALLOC_GFP);
pmd = __pmd(__pa(pte_p) | (PGTABLE_PROT & ptemask));
- paravirt_alloc_pmd(&init_mm, __pa(pte_p) >> PAGE_SHIFT);
+ paravirt_alloc_pte(&init_mm, __pa(pte_p) >> PAGE_SHIFT);
for (n = 0; n < ESPFIX_PMD_CLONES; n++)
set_pmd(&pmd_p[n], pmd);
}
@@ -193,7 +193,6 @@
pte_p = pte_offset_kernel(&pmd, addr);
stack_page = (void *)__get_free_page(GFP_KERNEL);
pte = __pte(__pa(stack_page) | (__PAGE_KERNEL_RO & ptemask));
- paravirt_alloc_pte(&init_mm, __pa(stack_page) >> PAGE_SHIFT);
for (n = 0; n < ESPFIX_PTE_CLONES; n++)
set_pte(&pte_p[n*PTE_STRIDE], pte);
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index 7596df6..67e6d19e 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -574,6 +574,9 @@
struct kprobe *p;
struct kprobe_ctlblk *kcb;
+ if (user_mode_vm(regs))
+ return 0;
+
addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
/*
* We don't want to be preempted for the entire
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 57e5ce1..ea03031 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -920,9 +920,9 @@
tsc_khz = cpufreq_scale(tsc_khz_ref, ref_freq, freq->new);
if (!(freq->flags & CPUFREQ_CONST_LOOPS))
mark_tsc_unstable("cpufreq changes");
- }
- set_cyc2ns_scale(tsc_khz, freq->cpu);
+ set_cyc2ns_scale(tsc_khz, freq->cpu);
+ }
return 0;
}
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index f644933..ef432f8 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -5887,6 +5887,18 @@
kvm_x86_ops->set_nmi(vcpu);
}
} else if (kvm_cpu_has_injectable_intr(vcpu)) {
+ /*
+ * Because interrupts can be injected asynchronously, we are
+ * calling check_nested_events again here to avoid a race condition.
+ * See https://lkml.org/lkml/2014/7/2/60 for discussion about this
+ * proposal and current concerns. Perhaps we should be setting
+ * KVM_REQ_EVENT only on certain events and not unconditionally?
+ */
+ if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) {
+ r = kvm_x86_ops->check_nested_events(vcpu, req_int_win);
+ if (r != 0)
+ return r;
+ }
if (kvm_x86_ops->interrupt_allowed(vcpu)) {
kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu),
false);
diff --git a/arch/xtensa/kernel/vectors.S b/arch/xtensa/kernel/vectors.S
index f9e1ec3..8453e6e 100644
--- a/arch/xtensa/kernel/vectors.S
+++ b/arch/xtensa/kernel/vectors.S
@@ -376,38 +376,42 @@
beqz a2, 1f # if at start of vector, don't restore
addi a0, a0, -128
- bbsi a0, 8, 1f # don't restore except for overflow 8 and 12
- bbsi a0, 7, 2f
+ bbsi.l a0, 8, 1f # don't restore except for overflow 8 and 12
+
+ /*
+ * This fixup handler is for the extremely unlikely case where the
+ * overflow handler's reference thru a0 gets a hardware TLB refill
+ * that bumps out the (distinct, aliasing) TLB entry that mapped its
+ * prior references thru a9/a13, and where our reference now thru
+ * a9/a13 gets a 2nd-level miss exception (not hardware TLB refill).
+ */
+ movi a2, window_overflow_restore_a0_fixup
+ s32i a2, a3, EXC_TABLE_FIXUP
+ l32i a2, a3, EXC_TABLE_DOUBLE_SAVE
+ xsr a3, excsave1
+
+ bbsi.l a0, 7, 2f
/*
* Restore a0 as saved by _WindowOverflow8().
- *
- * FIXME: we really need a fixup handler for this L32E,
- * for the extremely unlikely case where the overflow handler's
- * reference thru a0 gets a hardware TLB refill that bumps out
- * the (distinct, aliasing) TLB entry that mapped its prior
- * references thru a9, and where our reference now thru a9
- * gets a 2nd-level miss exception (not hardware TLB refill).
*/
- l32e a2, a9, -16
- wsr a2, depc # replace the saved a0
- j 1f
+ l32e a0, a9, -16
+ wsr a0, depc # replace the saved a0
+ j 3f
2:
/*
* Restore a0 as saved by _WindowOverflow12().
- *
- * FIXME: we really need a fixup handler for this L32E,
- * for the extremely unlikely case where the overflow handler's
- * reference thru a0 gets a hardware TLB refill that bumps out
- * the (distinct, aliasing) TLB entry that mapped its prior
- * references thru a13, and where our reference now thru a13
- * gets a 2nd-level miss exception (not hardware TLB refill).
*/
- l32e a2, a13, -16
- wsr a2, depc # replace the saved a0
+ l32e a0, a13, -16
+ wsr a0, depc # replace the saved a0
+3:
+ xsr a3, excsave1
+ movi a0, 0
+ s32i a0, a3, EXC_TABLE_FIXUP
+ s32i a2, a3, EXC_TABLE_DOUBLE_SAVE
1:
/*
* Restore WindowBase while leaving all address registers restored.
@@ -449,6 +453,7 @@
s32i a0, a2, PT_DEPC
+_DoubleExceptionVector_handle_exception:
addx4 a0, a0, a3
l32i a0, a0, EXC_TABLE_FAST_USER
xsr a3, excsave1
@@ -464,11 +469,120 @@
rotw -3
j 1b
- .end literal_prefix
ENDPROC(_DoubleExceptionVector)
/*
+ * Fixup handler for TLB miss in double exception handler for window owerflow.
+ * We get here with windowbase set to the window that was being spilled and
+ * a0 trashed. a0 bit 7 determines if this is a call8 (bit clear) or call12
+ * (bit set) window.
+ *
+ * We do the following here:
+ * - go to the original window retaining a0 value;
+ * - set up exception stack to return back to appropriate a0 restore code
+ * (we'll need to rotate window back and there's no place to save this
+ * information, use different return address for that);
+ * - handle the exception;
+ * - go to the window that was being spilled;
+ * - set up window_overflow_restore_a0_fixup as a fixup routine;
+ * - reload a0;
+ * - restore the original window;
+ * - reset the default fixup routine;
+ * - return to user. By the time we get to this fixup handler all information
+ * about the conditions of the original double exception that happened in
+ * the window overflow handler is lost, so we just return to userspace to
+ * retry overflow from start.
+ *
+ * a0: value of depc, original value in depc
+ * a2: trashed, original value in EXC_TABLE_DOUBLE_SAVE
+ * a3: exctable, original value in excsave1
+ */
+
+ENTRY(window_overflow_restore_a0_fixup)
+
+ rsr a0, ps
+ extui a0, a0, PS_OWB_SHIFT, PS_OWB_WIDTH
+ rsr a2, windowbase
+ sub a0, a2, a0
+ extui a0, a0, 0, 3
+ l32i a2, a3, EXC_TABLE_DOUBLE_SAVE
+ xsr a3, excsave1
+
+ _beqi a0, 1, .Lhandle_1
+ _beqi a0, 3, .Lhandle_3
+
+ .macro overflow_fixup_handle_exception_pane n
+
+ rsr a0, depc
+ rotw -\n
+
+ xsr a3, excsave1
+ wsr a2, depc
+ l32i a2, a3, EXC_TABLE_KSTK
+ s32i a0, a2, PT_AREG0
+
+ movi a0, .Lrestore_\n
+ s32i a0, a2, PT_DEPC
+ rsr a0, exccause
+ j _DoubleExceptionVector_handle_exception
+
+ .endm
+
+ overflow_fixup_handle_exception_pane 2
+.Lhandle_1:
+ overflow_fixup_handle_exception_pane 1
+.Lhandle_3:
+ overflow_fixup_handle_exception_pane 3
+
+ .macro overflow_fixup_restore_a0_pane n
+
+ rotw \n
+ /* Need to preserve a0 value here to be able to handle exception
+ * that may occur on a0 reload from stack. It may occur because
+ * TLB miss handler may not be atomic and pointer to page table
+ * may be lost before we get here. There are no free registers,
+ * so we need to use EXC_TABLE_DOUBLE_SAVE area.
+ */
+ xsr a3, excsave1
+ s32i a2, a3, EXC_TABLE_DOUBLE_SAVE
+ movi a2, window_overflow_restore_a0_fixup
+ s32i a2, a3, EXC_TABLE_FIXUP
+ l32i a2, a3, EXC_TABLE_DOUBLE_SAVE
+ xsr a3, excsave1
+ bbsi.l a0, 7, 1f
+ l32e a0, a9, -16
+ j 2f
+1:
+ l32e a0, a13, -16
+2:
+ rotw -\n
+
+ .endm
+
+.Lrestore_2:
+ overflow_fixup_restore_a0_pane 2
+
+.Lset_default_fixup:
+ xsr a3, excsave1
+ s32i a2, a3, EXC_TABLE_DOUBLE_SAVE
+ movi a2, 0
+ s32i a2, a3, EXC_TABLE_FIXUP
+ l32i a2, a3, EXC_TABLE_DOUBLE_SAVE
+ xsr a3, excsave1
+ rfe
+
+.Lrestore_1:
+ overflow_fixup_restore_a0_pane 1
+ j .Lset_default_fixup
+.Lrestore_3:
+ overflow_fixup_restore_a0_pane 3
+ j .Lset_default_fixup
+
+ENDPROC(window_overflow_restore_a0_fixup)
+
+ .end literal_prefix
+/*
* Debug interrupt vector
*
* There is not much space here, so simply jump to another handler.
diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S
index ee32c00..d16db6d 100644
--- a/arch/xtensa/kernel/vmlinux.lds.S
+++ b/arch/xtensa/kernel/vmlinux.lds.S
@@ -269,13 +269,13 @@
.UserExceptionVector.literal)
SECTION_VECTOR (_DoubleExceptionVector_literal,
.DoubleExceptionVector.literal,
- DOUBLEEXC_VECTOR_VADDR - 16,
+ DOUBLEEXC_VECTOR_VADDR - 40,
SIZEOF(.UserExceptionVector.text),
.UserExceptionVector.text)
SECTION_VECTOR (_DoubleExceptionVector_text,
.DoubleExceptionVector.text,
DOUBLEEXC_VECTOR_VADDR,
- 32,
+ 40,
.DoubleExceptionVector.literal)
. = (LOADADDR( .DoubleExceptionVector.text ) + SIZEOF( .DoubleExceptionVector.text ) + 3) & ~ 3;
diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c
index 4224256..77ed202 100644
--- a/arch/xtensa/mm/init.c
+++ b/arch/xtensa/mm/init.c
@@ -191,7 +191,7 @@
return -EINVAL;
}
- if (it && start - it->start < bank_sz) {
+ if (it && start - it->start <= bank_sz) {
if (start == it->start) {
if (end - it->start < bank_sz) {
it->start = end;
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index b9f4cc4..28d227c 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -872,6 +872,13 @@
{
lockdep_assert_held(q->queue_lock);
+ /*
+ * @q could be exiting and already have destroyed all blkgs as
+ * indicated by NULL root_blkg. If so, don't confuse policies.
+ */
+ if (!q->root_blkg)
+ return;
+
blk_throtl_drain(q);
}
diff --git a/block/blk-tag.c b/block/blk-tag.c
index 3f33d86..a185b86 100644
--- a/block/blk-tag.c
+++ b/block/blk-tag.c
@@ -27,18 +27,15 @@
EXPORT_SYMBOL(blk_queue_find_tag);
/**
- * __blk_free_tags - release a given set of tag maintenance info
+ * blk_free_tags - release a given set of tag maintenance info
* @bqt: the tag map to free
*
- * Tries to free the specified @bqt. Returns true if it was
- * actually freed and false if there are still references using it
+ * Drop the reference count on @bqt and frees it when the last reference
+ * is dropped.
*/
-static int __blk_free_tags(struct blk_queue_tag *bqt)
+void blk_free_tags(struct blk_queue_tag *bqt)
{
- int retval;
-
- retval = atomic_dec_and_test(&bqt->refcnt);
- if (retval) {
+ if (atomic_dec_and_test(&bqt->refcnt)) {
BUG_ON(find_first_bit(bqt->tag_map, bqt->max_depth) <
bqt->max_depth);
@@ -50,9 +47,8 @@
kfree(bqt);
}
-
- return retval;
}
+EXPORT_SYMBOL(blk_free_tags);
/**
* __blk_queue_free_tags - release tag maintenance info
@@ -69,28 +65,13 @@
if (!bqt)
return;
- __blk_free_tags(bqt);
+ blk_free_tags(bqt);
q->queue_tags = NULL;
queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q);
}
/**
- * blk_free_tags - release a given set of tag maintenance info
- * @bqt: the tag map to free
- *
- * For externally managed @bqt frees the map. Callers of this
- * function must guarantee to have released all the queues that
- * might have been using this tag map.
- */
-void blk_free_tags(struct blk_queue_tag *bqt)
-{
- if (unlikely(!__blk_free_tags(bqt)))
- BUG();
-}
-EXPORT_SYMBOL(blk_free_tags);
-
-/**
* blk_queue_free_tags - release tag maintenance info
* @q: the request queue for the device
*
diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
index fbd5a67..a0926a6 100644
--- a/block/compat_ioctl.c
+++ b/block/compat_ioctl.c
@@ -690,6 +690,7 @@
case BLKROSET:
case BLKDISCARD:
case BLKSECDISCARD:
+ case BLKZEROOUT:
/*
* the ones below are implemented in blkdev_locked_ioctl,
* but we call blkdev_ioctl, which gets the lock for us
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index a34a228..3f5f745 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -42,6 +42,12 @@
if ACPI
+config ACPI_LEGACY_TABLES_LOOKUP
+ bool
+
+config ARCH_MIGHT_HAVE_ACPI_PDC
+ bool
+
config ACPI_SLEEP
bool
depends on SUSPEND || HIBERNATION
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index ea55e01..505d4d7 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -36,6 +36,7 @@
acpi-y += resource.o
acpi-y += acpi_processor.o
acpi-y += processor_core.o
+acpi-$(CONFIG_ARCH_MIGHT_HAVE_ACPI_PDC) += processor_pdc.o
acpi-y += ec.o
acpi-$(CONFIG_ACPI_DOCK) += dock.o
acpi-y += pci_root.o pci_link.o pci_irq.o
diff --git a/drivers/acpi/acpi_extlog.c b/drivers/acpi/acpi_extlog.c
index 1853341..340d095 100644
--- a/drivers/acpi/acpi_extlog.c
+++ b/drivers/acpi/acpi_extlog.c
@@ -69,11 +69,11 @@
#define ELOG_ENTRY_ADDR(phyaddr) \
(phyaddr - elog_base + (u8 *)elog_addr)
-static struct acpi_generic_status *extlog_elog_entry_check(int cpu, int bank)
+static struct acpi_hest_generic_status *extlog_elog_entry_check(int cpu, int bank)
{
int idx;
u64 data;
- struct acpi_generic_status *estatus;
+ struct acpi_hest_generic_status *estatus;
WARN_ON(cpu < 0);
idx = ELOG_IDX(cpu, bank);
@@ -82,7 +82,7 @@
return NULL;
data &= EXT_ELOG_ENTRY_MASK;
- estatus = (struct acpi_generic_status *)ELOG_ENTRY_ADDR(data);
+ estatus = (struct acpi_hest_generic_status *)ELOG_ENTRY_ADDR(data);
/* if no valid data in elog entry, just return */
if (estatus->block_status == 0)
@@ -92,7 +92,7 @@
}
static void __print_extlog_rcd(const char *pfx,
- struct acpi_generic_status *estatus, int cpu)
+ struct acpi_hest_generic_status *estatus, int cpu)
{
static atomic_t seqno;
unsigned int curr_seqno;
@@ -111,7 +111,7 @@
}
static int print_extlog_rcd(const char *pfx,
- struct acpi_generic_status *estatus, int cpu)
+ struct acpi_hest_generic_status *estatus, int cpu)
{
/* Not more than 2 messages every 5 seconds */
static DEFINE_RATELIMIT_STATE(ratelimit_corrected, 5*HZ, 2);
@@ -137,7 +137,7 @@
struct mce *mce = (struct mce *)data;
int bank = mce->bank;
int cpu = mce->extcpu;
- struct acpi_generic_status *estatus;
+ struct acpi_hest_generic_status *estatus;
int rc;
estatus = extlog_elog_entry_check(cpu, bank);
@@ -148,7 +148,7 @@
/* clear record status to enable BIOS to update it again */
estatus->block_status = 0;
- rc = print_extlog_rcd(NULL, (struct acpi_generic_status *)elog_buf, cpu);
+ rc = print_extlog_rcd(NULL, (struct acpi_hest_generic_status *)elog_buf, cpu);
return NOTIFY_STOP;
}
diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c
index 1c08574..1fdf5e0 100644
--- a/drivers/acpi/acpi_processor.c
+++ b/drivers/acpi/acpi_processor.c
@@ -268,7 +268,7 @@
pr->apic_id = apic_id;
cpu_index = acpi_map_cpuid(pr->apic_id, pr->acpi_id);
- if (!cpu0_initialized && !acpi_lapic) {
+ if (!cpu0_initialized && !acpi_has_cpu_in_madt()) {
cpu0_initialized = 1;
/* Handle UP system running SMP kernel, with no LAPIC in MADT */
if ((cpu_index == -1) && (num_online_cpus() == 1))
diff --git a/drivers/acpi/acpica/Makefile b/drivers/acpi/acpica/Makefile
index 8bb43f0..4be4cc94 100644
--- a/drivers/acpi/acpica/Makefile
+++ b/drivers/acpi/acpica/Makefile
@@ -2,7 +2,7 @@
# Makefile for ACPICA Core interpreter
#
-ccflags-y := -Os
+ccflags-y := -Os -DBUILDING_ACPICA
ccflags-$(CONFIG_ACPI_DEBUG) += -DACPI_DEBUG_OUTPUT
# use acpi.o to put all files here into acpi.o modparam namespace
@@ -175,5 +175,5 @@
utxferror.o \
utxfmutex.o
-acpi-$(ACPI_FUTURE_USAGE) += uttrack.o utcache.o
+acpi-$(ACPI_FUTURE_USAGE) += utfileio.o utprint.o uttrack.o utcache.o
diff --git a/drivers/acpi/acpica/acapps.h b/drivers/acpi/acpica/acapps.h
index 8698ffb..3d2c882 100644
--- a/drivers/acpi/acpica/acapps.h
+++ b/drivers/acpi/acpica/acapps.h
@@ -79,10 +79,13 @@
/* Macros for usage messages */
#define ACPI_USAGE_HEADER(usage) \
- printf ("Usage: %s\nOptions:\n", usage);
+ acpi_os_printf ("Usage: %s\nOptions:\n", usage);
+
+#define ACPI_USAGE_TEXT(description) \
+ acpi_os_printf (description);
#define ACPI_OPTION(name, description) \
- printf (" %-18s%s\n", name, description);
+ acpi_os_printf (" %-18s%s\n", name, description);
#define FILE_SUFFIX_DISASSEMBLY "dsl"
#define ACPI_TABLE_FILE_SUFFIX ".dat"
@@ -102,7 +105,7 @@
/*
* cmfsize - Common get file size function
*/
-u32 cm_get_file_size(FILE * file);
+u32 cm_get_file_size(ACPI_FILE file);
#ifndef ACPI_DUMP_APP
/*
diff --git a/drivers/acpi/acpica/acdebug.h b/drivers/acpi/acpica/acdebug.h
index 68a91eb..1d026ff 100644
--- a/drivers/acpi/acpica/acdebug.h
+++ b/drivers/acpi/acpica/acdebug.h
@@ -233,9 +233,6 @@
acpi_status
acpi_db_get_table_from_file(char *filename, struct acpi_table_header **table);
-acpi_status
-acpi_db_read_table_from_file(char *filename, struct acpi_table_header **table);
-
/*
* dbhistry - debugger HISTORY command
*/
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index 115eedc..ebf02cc 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -297,7 +297,7 @@
*
****************************************************************************/
-ACPI_GLOBAL(u8, acpi_gbl_db_output_flags);
+ACPI_INIT_GLOBAL(u8, acpi_gbl_db_output_flags, ACPI_DB_CONSOLE_OUTPUT);
#ifdef ACPI_DISASSEMBLER
@@ -362,6 +362,12 @@
#ifdef ACPI_APPLICATION
ACPI_INIT_GLOBAL(ACPI_FILE, acpi_gbl_debug_file, NULL);
+ACPI_INIT_GLOBAL(ACPI_FILE, acpi_gbl_output_file, NULL);
+
+/* Print buffer */
+
+ACPI_GLOBAL(acpi_spinlock, acpi_gbl_print_lock); /* For print buffer */
+ACPI_GLOBAL(char, acpi_gbl_print_buffer[1024]);
#endif /* ACPI_APPLICATION */
diff --git a/drivers/acpi/acpica/acutils.h b/drivers/acpi/acpica/acutils.h
index 1e256c5..ed614f4 100644
--- a/drivers/acpi/acpica/acutils.h
+++ b/drivers/acpi/acpica/acutils.h
@@ -95,7 +95,6 @@
#ifdef ACPI_ASL_COMPILER
#include <stdio.h>
-extern FILE *acpi_gbl_output_file;
#define ACPI_MSG_REDIRECT_BEGIN \
FILE *output_file = acpi_gbl_output_file; \
@@ -211,6 +210,8 @@
acpi_size acpi_ut_strlen(const char *string);
+char *acpi_ut_strchr(const char *string, int ch);
+
char *acpi_ut_strcpy(char *dst_string, const char *src_string);
char *acpi_ut_strncpy(char *dst_string,
@@ -257,7 +258,7 @@
#define ACPI_IS_XDIGIT(c) (_acpi_ctype[(unsigned char)(c)] & (_ACPI_XD))
#define ACPI_IS_UPPER(c) (_acpi_ctype[(unsigned char)(c)] & (_ACPI_UP))
#define ACPI_IS_LOWER(c) (_acpi_ctype[(unsigned char)(c)] & (_ACPI_LO))
-#define ACPI_IS_PRINT(c) (_acpi_ctype[(unsigned char)(c)] & (_ACPI_LO | _ACPI_UP | _ACPI_DI | _ACPI_SP | _ACPI_PU))
+#define ACPI_IS_PRINT(c) (_acpi_ctype[(unsigned char)(c)] & (_ACPI_LO | _ACPI_UP | _ACPI_DI | _ACPI_XS | _ACPI_PU))
#define ACPI_IS_ALPHA(c) (_acpi_ctype[(unsigned char)(c)] & (_ACPI_LO | _ACPI_UP))
#endif /* !ACPI_USE_SYSTEM_CLIBRARY */
@@ -352,6 +353,13 @@
void acpi_ut_dump_buffer(u8 *buffer, u32 count, u32 display, u32 offset);
+#ifdef ACPI_APPLICATION
+void
+acpi_ut_dump_buffer_to_file(ACPI_FILE file,
+ u8 *buffer,
+ u32 count, u32 display, u32 base_offset);
+#endif
+
void acpi_ut_report_error(char *module_name, u32 line_number);
void acpi_ut_report_info(char *module_name, u32 line_number);
@@ -394,6 +402,14 @@
u8 method_count, u8 *out_values);
/*
+ * utfileio - file operations
+ */
+#ifdef ACPI_APPLICATION
+acpi_status
+acpi_ut_read_table_from_file(char *filename, struct acpi_table_header **table);
+#endif
+
+/*
* utids - device ID support
*/
acpi_status
@@ -743,4 +759,23 @@
const struct ah_device_id *acpi_ah_match_hardware_id(char *hid);
+/*
+ * utprint - printf/vprintf output functions
+ */
+const char *acpi_ut_scan_number(const char *string, u64 *number_ptr);
+
+const char *acpi_ut_print_number(char *string, u64 number);
+
+int
+acpi_ut_vsnprintf(char *string,
+ acpi_size size, const char *format, va_list args);
+
+int acpi_ut_snprintf(char *string, acpi_size size, const char *format, ...);
+
+#ifdef ACPI_APPLICATION
+int acpi_ut_file_vprintf(ACPI_FILE file, const char *format, va_list args);
+
+int acpi_ut_file_printf(ACPI_FILE file, const char *format, ...);
+#endif
+
#endif /* _ACUTILS_H */
diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c
index 48f7001..e4ba4de 100644
--- a/drivers/acpi/acpica/evgpe.c
+++ b/drivers/acpi/acpica/evgpe.c
@@ -698,21 +698,6 @@
}
/*
- * If edge-triggered, clear the GPE status bit now. Note that
- * level-triggered events are cleared after the GPE is serviced.
- */
- if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) ==
- ACPI_GPE_EDGE_TRIGGERED) {
- status = acpi_hw_clear_gpe(gpe_event_info);
- if (ACPI_FAILURE(status)) {
- ACPI_EXCEPTION((AE_INFO, status,
- "Unable to clear GPE %02X",
- gpe_number));
- return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
- }
- }
-
- /*
* Always disable the GPE so that it does not keep firing before
* any asynchronous activity completes (either from the execution
* of a GPE method or an asynchronous GPE handler.)
@@ -729,6 +714,23 @@
}
/*
+ * If edge-triggered, clear the GPE status bit now. Note that
+ * level-triggered events are cleared after the GPE is serviced.
+ */
+ if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) ==
+ ACPI_GPE_EDGE_TRIGGERED) {
+ status = acpi_hw_clear_gpe(gpe_event_info);
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "Unable to clear GPE %02X",
+ gpe_number));
+ (void)acpi_hw_low_set_gpe(gpe_event_info,
+ ACPI_GPE_CONDITIONAL_ENABLE);
+ return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
+ }
+ }
+
+ /*
* Dispatch the GPE to either an installed handler or the control
* method associated with this GPE (_Lxx or _Exx). If a handler
* exists, we invoke it and do not attempt to run the method.
diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c
index cb534fa..0cf159c 100644
--- a/drivers/acpi/acpica/evxfgpe.c
+++ b/drivers/acpi/acpica/evxfgpe.c
@@ -126,11 +126,19 @@
flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
- /* Ensure that we have a valid GPE number */
-
+ /*
+ * Ensure that we have a valid GPE number and that there is some way
+ * of handling the GPE (handler or a GPE method). In other words, we
+ * won't allow a valid GPE to be enabled if there is no way to handle it.
+ */
gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
if (gpe_event_info) {
- status = acpi_ev_add_gpe_reference(gpe_event_info);
+ if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) !=
+ ACPI_GPE_DISPATCH_NONE) {
+ status = acpi_ev_add_gpe_reference(gpe_event_info);
+ } else {
+ status = AE_NO_HANDLER;
+ }
}
acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
@@ -179,6 +187,53 @@
/*******************************************************************************
*
+ * FUNCTION: acpi_mark_gpe_for_wake
+ *
+ * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1
+ * gpe_number - GPE level within the GPE block
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Mark a GPE as having the ability to wake the system. Simply
+ * sets the ACPI_GPE_CAN_WAKE flag.
+ *
+ * Some potential callers of acpi_setup_gpe_for_wake may know in advance that
+ * there won't be any notify handlers installed for device wake notifications
+ * from the given GPE (one example is a button GPE in Linux). For these cases,
+ * acpi_mark_gpe_for_wake should be used instead of acpi_setup_gpe_for_wake.
+ * This will set the ACPI_GPE_CAN_WAKE flag for the GPE without trying to
+ * setup implicit wake notification for it (since there's no handler method).
+ *
+ ******************************************************************************/
+acpi_status acpi_mark_gpe_for_wake(acpi_handle gpe_device, u32 gpe_number)
+{
+ struct acpi_gpe_event_info *gpe_event_info;
+ acpi_status status = AE_BAD_PARAMETER;
+ acpi_cpu_flags flags;
+
+ ACPI_FUNCTION_TRACE(acpi_mark_gpe_for_wake);
+
+ flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+
+ /* Ensure that we have a valid GPE number */
+
+ gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
+ if (gpe_event_info) {
+
+ /* Mark the GPE as a possible wake event */
+
+ gpe_event_info->flags |= ACPI_GPE_CAN_WAKE;
+ status = AE_OK;
+ }
+
+ acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+ return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_mark_gpe_for_wake)
+
+/*******************************************************************************
+ *
* FUNCTION: acpi_setup_gpe_for_wake
*
* PARAMETERS: wake_device - Device associated with the GPE (via _PRW)
diff --git a/drivers/acpi/acpica/exfield.c b/drivers/acpi/acpica/exfield.c
index 12878e1..1ff42c0 100644
--- a/drivers/acpi/acpica/exfield.c
+++ b/drivers/acpi/acpica/exfield.c
@@ -56,7 +56,7 @@
/*******************************************************************************
*
- * FUNCTION: acpi_get_serial_access_bytes
+ * FUNCTION: acpi_ex_get_serial_access_length
*
* PARAMETERS: accessor_type - The type of the protocol indicated by region
* field access attributes
@@ -103,7 +103,7 @@
case AML_FIELD_ATTRIB_BLOCK_CALL:
default:
- length = ACPI_GSBUS_BUFFER_SIZE;
+ length = ACPI_GSBUS_BUFFER_SIZE - 2;
break;
}
diff --git a/drivers/acpi/acpica/hwregs.c b/drivers/acpi/acpica/hwregs.c
index e0fd9b4..a4c34d2 100644
--- a/drivers/acpi/acpica/hwregs.c
+++ b/drivers/acpi/acpica/hwregs.c
@@ -278,8 +278,9 @@
acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags);
- if (ACPI_FAILURE(status))
+ if (ACPI_FAILURE(status)) {
goto exit;
+ }
/* Clear the GPE Bits in all GPE registers in all GPE blocks */
diff --git a/drivers/acpi/acpica/nsobject.c b/drivers/acpi/acpica/nsobject.c
index fe54a8c..a42ee9d 100644
--- a/drivers/acpi/acpica/nsobject.c
+++ b/drivers/acpi/acpica/nsobject.c
@@ -237,6 +237,16 @@
(node->object->common.type != ACPI_TYPE_LOCAL_DATA)) {
node->object = node->object->common.next_object;
}
+
+ /*
+ * Detach the object from any data objects (which are still held by
+ * the namespace node)
+ */
+ if (obj_desc->common.next_object &&
+ ((obj_desc->common.next_object)->common.type ==
+ ACPI_TYPE_LOCAL_DATA)) {
+ obj_desc->common.next_object = NULL;
+ }
}
/* Reset the node type to untyped */
diff --git a/drivers/acpi/acpica/utbuffer.c b/drivers/acpi/acpica/utbuffer.c
index 3c16997..038ea88 100644
--- a/drivers/acpi/acpica/utbuffer.c
+++ b/drivers/acpi/acpica/utbuffer.c
@@ -199,3 +199,131 @@
acpi_ut_dump_buffer(buffer, count, display, 0);
}
+
+#ifdef ACPI_APPLICATION
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_dump_buffer_to_file
+ *
+ * PARAMETERS: file - File descriptor
+ * buffer - Buffer to dump
+ * count - Amount to dump, in bytes
+ * display - BYTE, WORD, DWORD, or QWORD display:
+ * DB_BYTE_DISPLAY
+ * DB_WORD_DISPLAY
+ * DB_DWORD_DISPLAY
+ * DB_QWORD_DISPLAY
+ * base_offset - Beginning buffer offset (display only)
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Generic dump buffer in both hex and ascii to a file.
+ *
+ ******************************************************************************/
+
+void
+acpi_ut_dump_buffer_to_file(ACPI_FILE file,
+ u8 *buffer, u32 count, u32 display, u32 base_offset)
+{
+ u32 i = 0;
+ u32 j;
+ u32 temp32;
+ u8 buf_char;
+
+ if (!buffer) {
+ acpi_ut_file_printf(file,
+ "Null Buffer Pointer in DumpBuffer!\n");
+ return;
+ }
+
+ if ((count < 4) || (count & 0x01)) {
+ display = DB_BYTE_DISPLAY;
+ }
+
+ /* Nasty little dump buffer routine! */
+
+ while (i < count) {
+
+ /* Print current offset */
+
+ acpi_ut_file_printf(file, "%6.4X: ", (base_offset + i));
+
+ /* Print 16 hex chars */
+
+ for (j = 0; j < 16;) {
+ if (i + j >= count) {
+
+ /* Dump fill spaces */
+
+ acpi_ut_file_printf(file, "%*s",
+ ((display * 2) + 1), " ");
+ j += display;
+ continue;
+ }
+
+ switch (display) {
+ case DB_BYTE_DISPLAY:
+ default: /* Default is BYTE display */
+
+ acpi_ut_file_printf(file, "%02X ",
+ buffer[(acpi_size) i + j]);
+ break;
+
+ case DB_WORD_DISPLAY:
+
+ ACPI_MOVE_16_TO_32(&temp32,
+ &buffer[(acpi_size) i + j]);
+ acpi_ut_file_printf(file, "%04X ", temp32);
+ break;
+
+ case DB_DWORD_DISPLAY:
+
+ ACPI_MOVE_32_TO_32(&temp32,
+ &buffer[(acpi_size) i + j]);
+ acpi_ut_file_printf(file, "%08X ", temp32);
+ break;
+
+ case DB_QWORD_DISPLAY:
+
+ ACPI_MOVE_32_TO_32(&temp32,
+ &buffer[(acpi_size) i + j]);
+ acpi_ut_file_printf(file, "%08X", temp32);
+
+ ACPI_MOVE_32_TO_32(&temp32,
+ &buffer[(acpi_size) i + j +
+ 4]);
+ acpi_ut_file_printf(file, "%08X ", temp32);
+ break;
+ }
+
+ j += display;
+ }
+
+ /*
+ * Print the ASCII equivalent characters but watch out for the bad
+ * unprintable ones (printable chars are 0x20 through 0x7E)
+ */
+ acpi_ut_file_printf(file, " ");
+ for (j = 0; j < 16; j++) {
+ if (i + j >= count) {
+ acpi_ut_file_printf(file, "\n");
+ return;
+ }
+
+ buf_char = buffer[(acpi_size) i + j];
+ if (ACPI_IS_PRINT(buf_char)) {
+ acpi_ut_file_printf(file, "%c", buf_char);
+ } else {
+ acpi_ut_file_printf(file, ".");
+ }
+ }
+
+ /* Done with that line. */
+
+ acpi_ut_file_printf(file, "\n");
+ i += 16;
+ }
+
+ return;
+}
+#endif
diff --git a/drivers/acpi/acpica/utcopy.c b/drivers/acpi/acpica/utcopy.c
index 270c164..ff601c0 100644
--- a/drivers/acpi/acpica/utcopy.c
+++ b/drivers/acpi/acpica/utcopy.c
@@ -1001,5 +1001,11 @@
status = acpi_ut_copy_simple_object(source_desc, *dest_desc);
}
+ /* Delete the allocated object if copy failed */
+
+ if (ACPI_FAILURE(status)) {
+ acpi_ut_remove_reference(*dest_desc);
+ }
+
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/utdebug.c b/drivers/acpi/acpica/utdebug.c
index 21a20ac..e516254 100644
--- a/drivers/acpi/acpica/utdebug.c
+++ b/drivers/acpi/acpica/utdebug.c
@@ -561,3 +561,29 @@
}
#endif
+
+#ifdef ACPI_APPLICATION
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_log_error
+ *
+ * PARAMETERS: format - Printf format field
+ * ... - Optional printf arguments
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Print error message to the console, used by applications.
+ *
+ ******************************************************************************/
+
+void ACPI_INTERNAL_VAR_XFACE acpi_log_error(const char *format, ...)
+{
+ va_list args;
+
+ va_start(args, format);
+ (void)acpi_ut_file_vprintf(ACPI_FILE_ERR, format, args);
+ va_end(args);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_log_error)
+#endif
diff --git a/drivers/acpi/acpica/utfileio.c b/drivers/acpi/acpica/utfileio.c
new file mode 100644
index 0000000..bdf9914
--- /dev/null
+++ b/drivers/acpi/acpica/utfileio.c
@@ -0,0 +1,332 @@
+/*******************************************************************************
+ *
+ * Module Name: utfileio - simple file I/O routines
+ *
+ ******************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2014, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "actables.h"
+#include "acapps.h"
+
+#ifdef ACPI_ASL_COMPILER
+#include "aslcompiler.h"
+#endif
+
+#define _COMPONENT ACPI_CA_DEBUGGER
+ACPI_MODULE_NAME("utfileio")
+
+#ifdef ACPI_APPLICATION
+/* Local prototypes */
+static acpi_status
+acpi_ut_check_text_mode_corruption(u8 *table,
+ u32 table_length, u32 file_length);
+
+static acpi_status
+acpi_ut_read_table(FILE * fp,
+ struct acpi_table_header **table, u32 *table_length);
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_check_text_mode_corruption
+ *
+ * PARAMETERS: table - Table buffer
+ * table_length - Length of table from the table header
+ * file_length - Length of the file that contains the table
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Check table for text mode file corruption where all linefeed
+ * characters (LF) have been replaced by carriage return linefeed
+ * pairs (CR/LF).
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ut_check_text_mode_corruption(u8 *table, u32 table_length, u32 file_length)
+{
+ u32 i;
+ u32 pairs = 0;
+
+ if (table_length != file_length) {
+ ACPI_WARNING((AE_INFO,
+ "File length (0x%X) is not the same as the table length (0x%X)",
+ file_length, table_length));
+ }
+
+ /* Scan entire table to determine if each LF has been prefixed with a CR */
+
+ for (i = 1; i < file_length; i++) {
+ if (table[i] == 0x0A) {
+ if (table[i - 1] != 0x0D) {
+
+ /* The LF does not have a preceding CR, table not corrupted */
+
+ return (AE_OK);
+ } else {
+ /* Found a CR/LF pair */
+
+ pairs++;
+ }
+ i++;
+ }
+ }
+
+ if (!pairs) {
+ return (AE_OK);
+ }
+
+ /*
+ * Entire table scanned, each CR is part of a CR/LF pair --
+ * meaning that the table was treated as a text file somewhere.
+ *
+ * NOTE: We can't "fix" the table, because any existing CR/LF pairs in the
+ * original table are left untouched by the text conversion process --
+ * meaning that we cannot simply replace CR/LF pairs with LFs.
+ */
+ acpi_os_printf("Table has been corrupted by text mode conversion\n");
+ acpi_os_printf("All LFs (%u) were changed to CR/LF pairs\n", pairs);
+ acpi_os_printf("Table cannot be repaired!\n");
+ return (AE_BAD_VALUE);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_read_table
+ *
+ * PARAMETERS: fp - File that contains table
+ * table - Return value, buffer with table
+ * table_length - Return value, length of table
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Load the DSDT from the file pointer
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ut_read_table(FILE * fp,
+ struct acpi_table_header **table, u32 *table_length)
+{
+ struct acpi_table_header table_header;
+ u32 actual;
+ acpi_status status;
+ u32 file_size;
+ u8 standard_header = TRUE;
+ s32 count;
+
+ /* Get the file size */
+
+ file_size = cm_get_file_size(fp);
+ if (file_size == ACPI_UINT32_MAX) {
+ return (AE_ERROR);
+ }
+
+ if (file_size < 4) {
+ return (AE_BAD_HEADER);
+ }
+
+ /* Read the signature */
+
+ fseek(fp, 0, SEEK_SET);
+
+ count = fread(&table_header, 1, sizeof(struct acpi_table_header), fp);
+ if (count != sizeof(struct acpi_table_header)) {
+ acpi_os_printf("Could not read the table header\n");
+ return (AE_BAD_HEADER);
+ }
+
+ /* The RSDP table does not have standard ACPI header */
+
+ if (ACPI_VALIDATE_RSDP_SIG(table_header.signature)) {
+ *table_length = file_size;
+ standard_header = FALSE;
+ } else {
+
+#if 0
+ /* Validate the table header/length */
+
+ status = acpi_tb_validate_table_header(&table_header);
+ if (ACPI_FAILURE(status)) {
+ acpi_os_printf("Table header is invalid!\n");
+ return (status);
+ }
+#endif
+
+ /* File size must be at least as long as the Header-specified length */
+
+ if (table_header.length > file_size) {
+ acpi_os_printf
+ ("TableHeader length [0x%X] greater than the input file size [0x%X]\n",
+ table_header.length, file_size);
+
+#ifdef ACPI_ASL_COMPILER
+ status = fl_check_for_ascii(fp, NULL, FALSE);
+ if (ACPI_SUCCESS(status)) {
+ acpi_os_printf
+ ("File appears to be ASCII only, must be binary\n",
+ table_header.length, file_size);
+ }
+#endif
+ return (AE_BAD_HEADER);
+ }
+#ifdef ACPI_OBSOLETE_CODE
+ /* We only support a limited number of table types */
+
+ if (!ACPI_COMPARE_NAME
+ ((char *)table_header.signature, ACPI_SIG_DSDT)
+ && !ACPI_COMPARE_NAME((char *)table_header.signature,
+ ACPI_SIG_PSDT)
+ && !ACPI_COMPARE_NAME((char *)table_header.signature,
+ ACPI_SIG_SSDT)) {
+ acpi_os_printf
+ ("Table signature [%4.4s] is invalid or not supported\n",
+ (char *)table_header.signature);
+ ACPI_DUMP_BUFFER(&table_header,
+ sizeof(struct acpi_table_header));
+ return (AE_ERROR);
+ }
+#endif
+
+ *table_length = table_header.length;
+ }
+
+ /* Allocate a buffer for the table */
+
+ *table = acpi_os_allocate((size_t) file_size);
+ if (!*table) {
+ acpi_os_printf
+ ("Could not allocate memory for ACPI table %4.4s (size=0x%X)\n",
+ table_header.signature, *table_length);
+ return (AE_NO_MEMORY);
+ }
+
+ /* Get the rest of the table */
+
+ fseek(fp, 0, SEEK_SET);
+ actual = fread(*table, 1, (size_t) file_size, fp);
+ if (actual == file_size) {
+ if (standard_header) {
+
+ /* Now validate the checksum */
+
+ status = acpi_tb_verify_checksum((void *)*table,
+ ACPI_CAST_PTR(struct
+ acpi_table_header,
+ *table)->
+ length);
+
+ if (status == AE_BAD_CHECKSUM) {
+ status =
+ acpi_ut_check_text_mode_corruption((u8 *)
+ *table,
+ file_size,
+ (*table)->
+ length);
+ return (status);
+ }
+ }
+ return (AE_OK);
+ }
+
+ if (actual > 0) {
+ acpi_os_printf("Warning - reading table, asked for %X got %X\n",
+ file_size, actual);
+ return (AE_OK);
+ }
+
+ acpi_os_printf("Error - could not read the table file\n");
+ acpi_os_free(*table);
+ *table = NULL;
+ *table_length = 0;
+ return (AE_ERROR);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_read_table_from_file
+ *
+ * PARAMETERS: filename - File where table is located
+ * table - Where a pointer to the table is returned
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Get an ACPI table from a file
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ut_read_table_from_file(char *filename, struct acpi_table_header ** table)
+{
+ FILE *file;
+ u32 file_size;
+ u32 table_length;
+ acpi_status status = AE_ERROR;
+
+ /* Open the file, get current size */
+
+ file = fopen(filename, "rb");
+ if (!file) {
+ perror("Could not open input file");
+ return (status);
+ }
+
+ file_size = cm_get_file_size(file);
+ if (file_size == ACPI_UINT32_MAX) {
+ goto exit;
+ }
+
+ /* Get the entire file */
+
+ fprintf(stderr,
+ "Loading Acpi table from file %10s - Length %.8u (%06X)\n",
+ filename, file_size, file_size);
+
+ status = acpi_ut_read_table(file, table, &table_length);
+ if (ACPI_FAILURE(status)) {
+ acpi_os_printf("Could not get table from the file\n");
+ }
+
+exit:
+ fclose(file);
+ return (status);
+}
+
+#endif
diff --git a/drivers/acpi/acpica/utglobal.c b/drivers/acpi/acpica/utglobal.c
index d69be3c..77ceac7 100644
--- a/drivers/acpi/acpica/utglobal.c
+++ b/drivers/acpi/acpica/utglobal.c
@@ -214,152 +214,6 @@
};
#endif /* !ACPI_REDUCED_HARDWARE */
-/*******************************************************************************
- *
- * FUNCTION: acpi_ut_init_globals
- *
- * PARAMETERS: None
- *
- * RETURN: Status
- *
- * DESCRIPTION: Initialize ACPICA globals. All globals that require specific
- * initialization should be initialized here. This allows for
- * a warm restart.
- *
- ******************************************************************************/
-
-acpi_status acpi_ut_init_globals(void)
-{
- acpi_status status;
- u32 i;
-
- ACPI_FUNCTION_TRACE(ut_init_globals);
-
- /* Create all memory caches */
-
- status = acpi_ut_create_caches();
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
-
- /* Address Range lists */
-
- for (i = 0; i < ACPI_ADDRESS_RANGE_MAX; i++) {
- acpi_gbl_address_range_list[i] = NULL;
- }
-
- /* Mutex locked flags */
-
- for (i = 0; i < ACPI_NUM_MUTEX; i++) {
- acpi_gbl_mutex_info[i].mutex = NULL;
- acpi_gbl_mutex_info[i].thread_id = ACPI_MUTEX_NOT_ACQUIRED;
- acpi_gbl_mutex_info[i].use_count = 0;
- }
-
- for (i = 0; i < ACPI_NUM_OWNERID_MASKS; i++) {
- acpi_gbl_owner_id_mask[i] = 0;
- }
-
- /* Last owner_ID is never valid */
-
- acpi_gbl_owner_id_mask[ACPI_NUM_OWNERID_MASKS - 1] = 0x80000000;
-
- /* Event counters */
-
- acpi_method_count = 0;
- acpi_sci_count = 0;
- acpi_gpe_count = 0;
-
- for (i = 0; i < ACPI_NUM_FIXED_EVENTS; i++) {
- acpi_fixed_event_count[i] = 0;
- }
-
-#if (!ACPI_REDUCED_HARDWARE)
-
- /* GPE/SCI support */
-
- acpi_gbl_all_gpes_initialized = FALSE;
- acpi_gbl_gpe_xrupt_list_head = NULL;
- acpi_gbl_gpe_fadt_blocks[0] = NULL;
- acpi_gbl_gpe_fadt_blocks[1] = NULL;
- acpi_current_gpe_count = 0;
-
- acpi_gbl_global_event_handler = NULL;
- acpi_gbl_sci_handler_list = NULL;
-
-#endif /* !ACPI_REDUCED_HARDWARE */
-
- /* Global handlers */
-
- acpi_gbl_global_notify[0].handler = NULL;
- acpi_gbl_global_notify[1].handler = NULL;
- acpi_gbl_exception_handler = NULL;
- acpi_gbl_init_handler = NULL;
- acpi_gbl_table_handler = NULL;
- acpi_gbl_interface_handler = NULL;
-
- /* Global Lock support */
-
- acpi_gbl_global_lock_semaphore = NULL;
- acpi_gbl_global_lock_mutex = NULL;
- acpi_gbl_global_lock_acquired = FALSE;
- acpi_gbl_global_lock_handle = 0;
- acpi_gbl_global_lock_present = FALSE;
-
- /* Miscellaneous variables */
-
- acpi_gbl_DSDT = NULL;
- acpi_gbl_cm_single_step = FALSE;
- acpi_gbl_shutdown = FALSE;
- acpi_gbl_ns_lookup_count = 0;
- acpi_gbl_ps_find_count = 0;
- acpi_gbl_acpi_hardware_present = TRUE;
- acpi_gbl_last_owner_id_index = 0;
- acpi_gbl_next_owner_id_offset = 0;
- acpi_gbl_trace_dbg_level = 0;
- acpi_gbl_trace_dbg_layer = 0;
- acpi_gbl_debugger_configuration = DEBUGGER_THREADING;
- acpi_gbl_db_output_flags = ACPI_DB_CONSOLE_OUTPUT;
- acpi_gbl_osi_mutex = NULL;
- acpi_gbl_reg_methods_executed = FALSE;
-
- /* Hardware oriented */
-
- acpi_gbl_events_initialized = FALSE;
- acpi_gbl_system_awake_and_running = TRUE;
-
- /* Namespace */
-
- acpi_gbl_module_code_list = NULL;
- acpi_gbl_root_node = NULL;
- acpi_gbl_root_node_struct.name.integer = ACPI_ROOT_NAME;
- acpi_gbl_root_node_struct.descriptor_type = ACPI_DESC_TYPE_NAMED;
- acpi_gbl_root_node_struct.type = ACPI_TYPE_DEVICE;
- acpi_gbl_root_node_struct.parent = NULL;
- acpi_gbl_root_node_struct.child = NULL;
- acpi_gbl_root_node_struct.peer = NULL;
- acpi_gbl_root_node_struct.object = NULL;
-
-#ifdef ACPI_DISASSEMBLER
- acpi_gbl_external_list = NULL;
- acpi_gbl_num_external_methods = 0;
- acpi_gbl_resolved_external_methods = 0;
-#endif
-
-#ifdef ACPI_DEBUG_OUTPUT
- acpi_gbl_lowest_stack_pointer = ACPI_CAST_PTR(acpi_size, ACPI_SIZE_MAX);
-#endif
-
-#ifdef ACPI_DBG_TRACK_ALLOCATIONS
- acpi_gbl_display_final_mem_stats = FALSE;
- acpi_gbl_disable_mem_tracking = FALSE;
-#endif
-
- ACPI_DEBUGGER_EXEC(acpi_gbl_db_terminate_threads = FALSE);
-
- return_ACPI_STATUS(AE_OK);
-}
-
/* Public globals */
ACPI_EXPORT_SYMBOL(acpi_gbl_FADT)
diff --git a/drivers/acpi/acpica/utinit.c b/drivers/acpi/acpica/utinit.c
index 5f56fc4..77120ec 100644
--- a/drivers/acpi/acpica/utinit.c
+++ b/drivers/acpi/acpica/utinit.c
@@ -102,6 +102,151 @@
}
#endif /* !ACPI_REDUCED_HARDWARE */
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_init_globals
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Initialize ACPICA globals. All globals that require specific
+ * initialization should be initialized here. This allows for
+ * a warm restart.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ut_init_globals(void)
+{
+ acpi_status status;
+ u32 i;
+
+ ACPI_FUNCTION_TRACE(ut_init_globals);
+
+ /* Create all memory caches */
+
+ status = acpi_ut_create_caches();
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Address Range lists */
+
+ for (i = 0; i < ACPI_ADDRESS_RANGE_MAX; i++) {
+ acpi_gbl_address_range_list[i] = NULL;
+ }
+
+ /* Mutex locked flags */
+
+ for (i = 0; i < ACPI_NUM_MUTEX; i++) {
+ acpi_gbl_mutex_info[i].mutex = NULL;
+ acpi_gbl_mutex_info[i].thread_id = ACPI_MUTEX_NOT_ACQUIRED;
+ acpi_gbl_mutex_info[i].use_count = 0;
+ }
+
+ for (i = 0; i < ACPI_NUM_OWNERID_MASKS; i++) {
+ acpi_gbl_owner_id_mask[i] = 0;
+ }
+
+ /* Last owner_ID is never valid */
+
+ acpi_gbl_owner_id_mask[ACPI_NUM_OWNERID_MASKS - 1] = 0x80000000;
+
+ /* Event counters */
+
+ acpi_method_count = 0;
+ acpi_sci_count = 0;
+ acpi_gpe_count = 0;
+
+ for (i = 0; i < ACPI_NUM_FIXED_EVENTS; i++) {
+ acpi_fixed_event_count[i] = 0;
+ }
+
+#if (!ACPI_REDUCED_HARDWARE)
+
+ /* GPE/SCI support */
+
+ acpi_gbl_all_gpes_initialized = FALSE;
+ acpi_gbl_gpe_xrupt_list_head = NULL;
+ acpi_gbl_gpe_fadt_blocks[0] = NULL;
+ acpi_gbl_gpe_fadt_blocks[1] = NULL;
+ acpi_current_gpe_count = 0;
+
+ acpi_gbl_global_event_handler = NULL;
+ acpi_gbl_sci_handler_list = NULL;
+
+#endif /* !ACPI_REDUCED_HARDWARE */
+
+ /* Global handlers */
+
+ acpi_gbl_global_notify[0].handler = NULL;
+ acpi_gbl_global_notify[1].handler = NULL;
+ acpi_gbl_exception_handler = NULL;
+ acpi_gbl_init_handler = NULL;
+ acpi_gbl_table_handler = NULL;
+ acpi_gbl_interface_handler = NULL;
+
+ /* Global Lock support */
+
+ acpi_gbl_global_lock_semaphore = NULL;
+ acpi_gbl_global_lock_mutex = NULL;
+ acpi_gbl_global_lock_acquired = FALSE;
+ acpi_gbl_global_lock_handle = 0;
+ acpi_gbl_global_lock_present = FALSE;
+
+ /* Miscellaneous variables */
+
+ acpi_gbl_DSDT = NULL;
+ acpi_gbl_cm_single_step = FALSE;
+ acpi_gbl_shutdown = FALSE;
+ acpi_gbl_ns_lookup_count = 0;
+ acpi_gbl_ps_find_count = 0;
+ acpi_gbl_acpi_hardware_present = TRUE;
+ acpi_gbl_last_owner_id_index = 0;
+ acpi_gbl_next_owner_id_offset = 0;
+ acpi_gbl_trace_dbg_level = 0;
+ acpi_gbl_trace_dbg_layer = 0;
+ acpi_gbl_debugger_configuration = DEBUGGER_THREADING;
+ acpi_gbl_osi_mutex = NULL;
+ acpi_gbl_reg_methods_executed = FALSE;
+
+ /* Hardware oriented */
+
+ acpi_gbl_events_initialized = FALSE;
+ acpi_gbl_system_awake_and_running = TRUE;
+
+ /* Namespace */
+
+ acpi_gbl_module_code_list = NULL;
+ acpi_gbl_root_node = NULL;
+ acpi_gbl_root_node_struct.name.integer = ACPI_ROOT_NAME;
+ acpi_gbl_root_node_struct.descriptor_type = ACPI_DESC_TYPE_NAMED;
+ acpi_gbl_root_node_struct.type = ACPI_TYPE_DEVICE;
+ acpi_gbl_root_node_struct.parent = NULL;
+ acpi_gbl_root_node_struct.child = NULL;
+ acpi_gbl_root_node_struct.peer = NULL;
+ acpi_gbl_root_node_struct.object = NULL;
+
+#ifdef ACPI_DISASSEMBLER
+ acpi_gbl_external_list = NULL;
+ acpi_gbl_num_external_methods = 0;
+ acpi_gbl_resolved_external_methods = 0;
+#endif
+
+#ifdef ACPI_DEBUG_OUTPUT
+ acpi_gbl_lowest_stack_pointer = ACPI_CAST_PTR(acpi_size, ACPI_SIZE_MAX);
+#endif
+
+#ifdef ACPI_DBG_TRACK_ALLOCATIONS
+ acpi_gbl_display_final_mem_stats = FALSE;
+ acpi_gbl_disable_mem_tracking = FALSE;
+#endif
+
+ ACPI_DEBUGGER_EXEC(acpi_gbl_db_terminate_threads = FALSE);
+
+ return_ACPI_STATUS(AE_OK);
+}
+
/******************************************************************************
*
* FUNCTION: acpi_ut_terminate
diff --git a/drivers/acpi/acpica/utprint.c b/drivers/acpi/acpica/utprint.c
new file mode 100644
index 0000000..1031164
--- /dev/null
+++ b/drivers/acpi/acpica/utprint.c
@@ -0,0 +1,661 @@
+/******************************************************************************
+ *
+ * Module Name: utprint - Formatted printing routines
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2014, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+
+#define _COMPONENT ACPI_UTILITIES
+ACPI_MODULE_NAME("utprint")
+
+#define ACPI_FORMAT_SIGN 0x01
+#define ACPI_FORMAT_SIGN_PLUS 0x02
+#define ACPI_FORMAT_SIGN_PLUS_SPACE 0x04
+#define ACPI_FORMAT_ZERO 0x08
+#define ACPI_FORMAT_LEFT 0x10
+#define ACPI_FORMAT_UPPER 0x20
+#define ACPI_FORMAT_PREFIX 0x40
+/* Local prototypes */
+static acpi_size
+acpi_ut_bound_string_length(const char *string, acpi_size count);
+
+static char *acpi_ut_bound_string_output(char *string, const char *end, char c);
+
+static char *acpi_ut_format_number(char *string,
+ char *end,
+ u64 number,
+ u8 base, s32 width, s32 precision, u8 type);
+
+static char *acpi_ut_put_number(char *string, u64 number, u8 base, u8 upper);
+
+/* Module globals */
+
+static const char acpi_gbl_lower_hex_digits[] = "0123456789abcdef";
+static const char acpi_gbl_upper_hex_digits[] = "0123456789ABCDEF";
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_bound_string_length
+ *
+ * PARAMETERS: string - String with boundary
+ * count - Boundary of the string
+ *
+ * RETURN: Length of the string. Less than or equal to Count.
+ *
+ * DESCRIPTION: Calculate the length of a string with boundary.
+ *
+ ******************************************************************************/
+
+static acpi_size
+acpi_ut_bound_string_length(const char *string, acpi_size count)
+{
+ u32 length = 0;
+
+ while (*string && count) {
+ length++;
+ string++;
+ count--;
+ }
+
+ return (length);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_bound_string_output
+ *
+ * PARAMETERS: string - String with boundary
+ * end - Boundary of the string
+ * c - Character to be output to the string
+ *
+ * RETURN: Updated position for next valid character
+ *
+ * DESCRIPTION: Output a character into a string with boundary check.
+ *
+ ******************************************************************************/
+
+static char *acpi_ut_bound_string_output(char *string, const char *end, char c)
+{
+
+ if (string < end) {
+ *string = c;
+ }
+
+ ++string;
+ return (string);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_put_number
+ *
+ * PARAMETERS: string - Buffer to hold reverse-ordered string
+ * number - Integer to be converted
+ * base - Base of the integer
+ * upper - Whether or not using upper cased digits
+ *
+ * RETURN: Updated position for next valid character
+ *
+ * DESCRIPTION: Convert an integer into a string, note that, the string holds a
+ * reversed ordered number without the trailing zero.
+ *
+ ******************************************************************************/
+
+static char *acpi_ut_put_number(char *string, u64 number, u8 base, u8 upper)
+{
+ const char *digits;
+ u64 digit_index;
+ char *pos;
+
+ pos = string;
+ digits = upper ? acpi_gbl_upper_hex_digits : acpi_gbl_lower_hex_digits;
+
+ if (number == 0) {
+ *(pos++) = '0';
+ } else {
+ while (number) {
+ (void)acpi_ut_divide(number, base, &number,
+ &digit_index);
+ *(pos++) = digits[digit_index];
+ }
+ }
+
+ /* *(Pos++) = '0'; */
+ return (pos);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_scan_number
+ *
+ * PARAMETERS: string - String buffer
+ * number_ptr - Where the number is returned
+ *
+ * RETURN: Updated position for next valid character
+ *
+ * DESCRIPTION: Scan a string for a decimal integer.
+ *
+ ******************************************************************************/
+
+const char *acpi_ut_scan_number(const char *string, u64 *number_ptr)
+{
+ u64 number = 0;
+
+ while (ACPI_IS_DIGIT(*string)) {
+ number *= 10;
+ number += *(string++) - '0';
+ }
+
+ *number_ptr = number;
+ return (string);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_print_number
+ *
+ * PARAMETERS: string - String buffer
+ * number - The number to be converted
+ *
+ * RETURN: Updated position for next valid character
+ *
+ * DESCRIPTION: Print a decimal integer into a string.
+ *
+ ******************************************************************************/
+
+const char *acpi_ut_print_number(char *string, u64 number)
+{
+ char ascii_string[20];
+ const char *pos1;
+ char *pos2;
+
+ pos1 = acpi_ut_put_number(ascii_string, number, 10, FALSE);
+ pos2 = string;
+
+ while (pos1 != ascii_string) {
+ *(pos2++) = *(--pos1);
+ }
+
+ *pos2 = 0;
+ return (string);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_format_number
+ *
+ * PARAMETERS: string - String buffer with boundary
+ * end - Boundary of the string
+ * number - The number to be converted
+ * base - Base of the integer
+ * width - Field width
+ * precision - Precision of the integer
+ * type - Special printing flags
+ *
+ * RETURN: Updated position for next valid character
+ *
+ * DESCRIPTION: Print an integer into a string with any base and any precision.
+ *
+ ******************************************************************************/
+
+static char *acpi_ut_format_number(char *string,
+ char *end,
+ u64 number,
+ u8 base, s32 width, s32 precision, u8 type)
+{
+ char sign;
+ char zero;
+ u8 need_prefix;
+ u8 upper;
+ s32 i;
+ char reversed_string[66];
+
+ /* Parameter validation */
+
+ if (base < 2 || base > 16) {
+ return (NULL);
+ }
+
+ if (type & ACPI_FORMAT_LEFT) {
+ type &= ~ACPI_FORMAT_ZERO;
+ }
+
+ need_prefix = ((type & ACPI_FORMAT_PREFIX)
+ && base != 10) ? TRUE : FALSE;
+ upper = (type & ACPI_FORMAT_UPPER) ? TRUE : FALSE;
+ zero = (type & ACPI_FORMAT_ZERO) ? '0' : ' ';
+
+ /* Calculate size according to sign and prefix */
+
+ sign = '\0';
+ if (type & ACPI_FORMAT_SIGN) {
+ if ((s64) number < 0) {
+ sign = '-';
+ number = -(s64) number;
+ width--;
+ } else if (type & ACPI_FORMAT_SIGN_PLUS) {
+ sign = '+';
+ width--;
+ } else if (type & ACPI_FORMAT_SIGN_PLUS_SPACE) {
+ sign = ' ';
+ width--;
+ }
+ }
+ if (need_prefix) {
+ width--;
+ if (base == 16) {
+ width--;
+ }
+ }
+
+ /* Generate full string in reverse order */
+
+ i = ACPI_PTR_DIFF(acpi_ut_put_number
+ (reversed_string, number, base, upper),
+ reversed_string);
+
+ /* Printing 100 using %2d gives "100", not "00" */
+
+ if (i > precision) {
+ precision = i;
+ }
+
+ width -= precision;
+
+ /* Output the string */
+
+ if (!(type & (ACPI_FORMAT_ZERO | ACPI_FORMAT_LEFT))) {
+ while (--width >= 0) {
+ string = acpi_ut_bound_string_output(string, end, ' ');
+ }
+ }
+ if (sign) {
+ string = acpi_ut_bound_string_output(string, end, sign);
+ }
+ if (need_prefix) {
+ string = acpi_ut_bound_string_output(string, end, '0');
+ if (base == 16) {
+ string = acpi_ut_bound_string_output(string, end,
+ upper ? 'X' : 'x');
+ }
+ }
+ if (!(type & ACPI_FORMAT_LEFT)) {
+ while (--width >= 0) {
+ string = acpi_ut_bound_string_output(string, end, zero);
+ }
+ }
+
+ while (i <= --precision) {
+ string = acpi_ut_bound_string_output(string, end, '0');
+ }
+ while (--i >= 0) {
+ string = acpi_ut_bound_string_output(string, end,
+ reversed_string[i]);
+ }
+ while (--width >= 0) {
+ string = acpi_ut_bound_string_output(string, end, ' ');
+ }
+
+ return (string);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_vsnprintf
+ *
+ * PARAMETERS: string - String with boundary
+ * size - Boundary of the string
+ * format - Standard printf format
+ * args - Argument list
+ *
+ * RETURN: Number of bytes actually written.
+ *
+ * DESCRIPTION: Formatted output to a string using argument list pointer.
+ *
+ ******************************************************************************/
+
+int
+acpi_ut_vsnprintf(char *string,
+ acpi_size size, const char *format, va_list args)
+{
+ u8 base = 10;
+ u8 type = 0;
+ s32 width = -1;
+ s32 precision = -1;
+ char qualifier = 0;
+ u64 number;
+ char *pos;
+ char *end;
+ char c;
+ const char *s;
+ const void *p;
+ s32 length;
+ int i;
+
+ pos = string;
+ end = string + size;
+
+ for (; *format; ++format) {
+ if (*format != '%') {
+ pos = acpi_ut_bound_string_output(pos, end, *format);
+ continue;
+ }
+
+ /* Process sign */
+
+ do {
+ ++format;
+ if (*format == '#') {
+ type |= ACPI_FORMAT_PREFIX;
+ } else if (*format == '0') {
+ type |= ACPI_FORMAT_ZERO;
+ } else if (*format == '+') {
+ type |= ACPI_FORMAT_SIGN_PLUS;
+ } else if (*format == ' ') {
+ type |= ACPI_FORMAT_SIGN_PLUS_SPACE;
+ } else if (*format == '-') {
+ type |= ACPI_FORMAT_LEFT;
+ } else {
+ break;
+ }
+ } while (1);
+
+ /* Process width */
+
+ if (ACPI_IS_DIGIT(*format)) {
+ format = acpi_ut_scan_number(format, &number);
+ width = (s32) number;
+ } else if (*format == '*') {
+ ++format;
+ width = va_arg(args, int);
+ if (width < 0) {
+ width = -width;
+ type |= ACPI_FORMAT_LEFT;
+ }
+ }
+
+ /* Process precision */
+
+ if (*format == '.') {
+ ++format;
+ if (ACPI_IS_DIGIT(*format)) {
+ format = acpi_ut_scan_number(format, &number);
+ precision = (s32) number;
+ } else if (*format == '*') {
+ ++format;
+ precision = va_arg(args, int);
+ }
+ if (precision < 0) {
+ precision = 0;
+ }
+ }
+
+ /* Process qualifier */
+
+ if (*format == 'h' || *format == 'l' || *format == 'L') {
+ qualifier = *format;
+ ++format;
+
+ if (qualifier == 'l' && *format == 'l') {
+ qualifier = 'L';
+ ++format;
+ }
+ }
+
+ switch (*format) {
+ case '%':
+
+ pos = acpi_ut_bound_string_output(pos, end, '%');
+ continue;
+
+ case 'c':
+
+ if (!(type & ACPI_FORMAT_LEFT)) {
+ while (--width > 0) {
+ pos =
+ acpi_ut_bound_string_output(pos,
+ end,
+ ' ');
+ }
+ }
+
+ c = (char)va_arg(args, int);
+ pos = acpi_ut_bound_string_output(pos, end, c);
+
+ while (--width > 0) {
+ pos =
+ acpi_ut_bound_string_output(pos, end, ' ');
+ }
+ continue;
+
+ case 's':
+
+ s = va_arg(args, char *);
+ if (!s) {
+ s = "<NULL>";
+ }
+ length = acpi_ut_bound_string_length(s, precision);
+ if (!(type & ACPI_FORMAT_LEFT)) {
+ while (length < width--) {
+ pos =
+ acpi_ut_bound_string_output(pos,
+ end,
+ ' ');
+ }
+ }
+ for (i = 0; i < length; ++i) {
+ pos = acpi_ut_bound_string_output(pos, end, *s);
+ ++s;
+ }
+ while (length < width--) {
+ pos =
+ acpi_ut_bound_string_output(pos, end, ' ');
+ }
+ continue;
+
+ case 'o':
+
+ base = 8;
+ break;
+
+ case 'X':
+
+ type |= ACPI_FORMAT_UPPER;
+
+ case 'x':
+
+ base = 16;
+ break;
+
+ case 'd':
+ case 'i':
+
+ type |= ACPI_FORMAT_SIGN;
+
+ case 'u':
+
+ break;
+
+ case 'p':
+
+ if (width == -1) {
+ width = 2 * sizeof(void *);
+ type |= ACPI_FORMAT_ZERO;
+ }
+
+ p = va_arg(args, void *);
+ pos = acpi_ut_format_number(pos, end,
+ ACPI_TO_INTEGER(p), 16,
+ width, precision, type);
+ continue;
+
+ default:
+
+ pos = acpi_ut_bound_string_output(pos, end, '%');
+ if (*format) {
+ pos =
+ acpi_ut_bound_string_output(pos, end,
+ *format);
+ } else {
+ --format;
+ }
+ continue;
+ }
+
+ if (qualifier == 'L') {
+ number = va_arg(args, u64);
+ if (type & ACPI_FORMAT_SIGN) {
+ number = (s64) number;
+ }
+ } else if (qualifier == 'l') {
+ number = va_arg(args, unsigned long);
+ if (type & ACPI_FORMAT_SIGN) {
+ number = (s32) number;
+ }
+ } else if (qualifier == 'h') {
+ number = (u16)va_arg(args, int);
+ if (type & ACPI_FORMAT_SIGN) {
+ number = (s16) number;
+ }
+ } else {
+ number = va_arg(args, unsigned int);
+ if (type & ACPI_FORMAT_SIGN) {
+ number = (signed int)number;
+ }
+ }
+
+ pos = acpi_ut_format_number(pos, end, number, base,
+ width, precision, type);
+ }
+
+ if (size > 0) {
+ if (pos < end) {
+ *pos = '\0';
+ } else {
+ end[-1] = '\0';
+ }
+ }
+
+ return (ACPI_PTR_DIFF(pos, string));
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_snprintf
+ *
+ * PARAMETERS: string - String with boundary
+ * size - Boundary of the string
+ * Format, ... - Standard printf format
+ *
+ * RETURN: Number of bytes actually written.
+ *
+ * DESCRIPTION: Formatted output to a string.
+ *
+ ******************************************************************************/
+
+int acpi_ut_snprintf(char *string, acpi_size size, const char *format, ...)
+{
+ va_list args;
+ int length;
+
+ va_start(args, format);
+ length = acpi_ut_vsnprintf(string, size, format, args);
+ va_end(args);
+
+ return (length);
+}
+
+#ifdef ACPI_APPLICATION
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_file_vprintf
+ *
+ * PARAMETERS: file - File descriptor
+ * format - Standard printf format
+ * args - Argument list
+ *
+ * RETURN: Number of bytes actually written.
+ *
+ * DESCRIPTION: Formatted output to a file using argument list pointer.
+ *
+ ******************************************************************************/
+
+int acpi_ut_file_vprintf(ACPI_FILE file, const char *format, va_list args)
+{
+ acpi_cpu_flags flags;
+ int length;
+
+ flags = acpi_os_acquire_lock(acpi_gbl_print_lock);
+ length = acpi_ut_vsnprintf(acpi_gbl_print_buffer,
+ sizeof(acpi_gbl_print_buffer), format, args);
+
+ (void)acpi_os_write_file(file, acpi_gbl_print_buffer, length, 1);
+ acpi_os_release_lock(acpi_gbl_print_lock, flags);
+
+ return (length);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_file_printf
+ *
+ * PARAMETERS: file - File descriptor
+ * Format, ... - Standard printf format
+ *
+ * RETURN: Number of bytes actually written.
+ *
+ * DESCRIPTION: Formatted output to a file.
+ *
+ ******************************************************************************/
+
+int acpi_ut_file_printf(ACPI_FILE file, const char *format, ...)
+{
+ va_list args;
+ int length;
+
+ va_start(args, format);
+ length = acpi_ut_file_vprintf(file, format, args);
+ va_end(args);
+
+ return (length);
+}
+#endif
diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h
index e5bcd91..16129c7 100644
--- a/drivers/acpi/apei/apei-internal.h
+++ b/drivers/acpi/apei/apei-internal.h
@@ -121,11 +121,11 @@
struct dentry *apei_get_debugfs_dir(void);
#define apei_estatus_for_each_section(estatus, section) \
- for (section = (struct acpi_generic_data *)(estatus + 1); \
+ for (section = (struct acpi_hest_generic_data *)(estatus + 1); \
(void *)section - (void *)estatus < estatus->data_length; \
section = (void *)(section+1) + section->error_data_length)
-static inline u32 cper_estatus_len(struct acpi_generic_status *estatus)
+static inline u32 cper_estatus_len(struct acpi_hest_generic_status *estatus)
{
if (estatus->raw_data_length)
return estatus->raw_data_offset + \
@@ -135,9 +135,9 @@
}
void cper_estatus_print(const char *pfx,
- const struct acpi_generic_status *estatus);
-int cper_estatus_check_header(const struct acpi_generic_status *estatus);
-int cper_estatus_check(const struct acpi_generic_status *estatus);
+ const struct acpi_hest_generic_status *estatus);
+int cper_estatus_check_header(const struct acpi_hest_generic_status *estatus);
+int cper_estatus_check(const struct acpi_hest_generic_status *estatus);
int apei_osc_setup(void);
#endif
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index dab7cb7..7a38d14 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -74,13 +74,13 @@
#define GHES_ESTATUS_CACHE_LEN(estatus_len) \
(sizeof(struct ghes_estatus_cache) + (estatus_len))
#define GHES_ESTATUS_FROM_CACHE(estatus_cache) \
- ((struct acpi_generic_status *) \
+ ((struct acpi_hest_generic_status *) \
((struct ghes_estatus_cache *)(estatus_cache) + 1))
#define GHES_ESTATUS_NODE_LEN(estatus_len) \
(sizeof(struct ghes_estatus_node) + (estatus_len))
#define GHES_ESTATUS_FROM_NODE(estatus_node) \
- ((struct acpi_generic_status *) \
+ ((struct acpi_hest_generic_status *) \
((struct ghes_estatus_node *)(estatus_node) + 1))
bool ghes_disable;
@@ -408,7 +408,7 @@
ghes->flags &= ~GHES_TO_CLEAR;
}
-static void ghes_handle_memory_failure(struct acpi_generic_data *gdata, int sev)
+static void ghes_handle_memory_failure(struct acpi_hest_generic_data *gdata, int sev)
{
#ifdef CONFIG_ACPI_APEI_MEMORY_FAILURE
unsigned long pfn;
@@ -441,10 +441,10 @@
}
static void ghes_do_proc(struct ghes *ghes,
- const struct acpi_generic_status *estatus)
+ const struct acpi_hest_generic_status *estatus)
{
int sev, sec_sev;
- struct acpi_generic_data *gdata;
+ struct acpi_hest_generic_data *gdata;
sev = ghes_severity(estatus->error_severity);
apei_estatus_for_each_section(estatus, gdata) {
@@ -498,7 +498,7 @@
static void __ghes_print_estatus(const char *pfx,
const struct acpi_hest_generic *generic,
- const struct acpi_generic_status *estatus)
+ const struct acpi_hest_generic_status *estatus)
{
static atomic_t seqno;
unsigned int curr_seqno;
@@ -520,7 +520,7 @@
static int ghes_print_estatus(const char *pfx,
const struct acpi_hest_generic *generic,
- const struct acpi_generic_status *estatus)
+ const struct acpi_hest_generic_status *estatus)
{
/* Not more than 2 messages every 5 seconds */
static DEFINE_RATELIMIT_STATE(ratelimit_corrected, 5*HZ, 2);
@@ -542,13 +542,13 @@
* GHES error status reporting throttle, to report more kinds of
* errors, instead of just most frequently occurred errors.
*/
-static int ghes_estatus_cached(struct acpi_generic_status *estatus)
+static int ghes_estatus_cached(struct acpi_hest_generic_status *estatus)
{
u32 len;
int i, cached = 0;
unsigned long long now;
struct ghes_estatus_cache *cache;
- struct acpi_generic_status *cache_estatus;
+ struct acpi_hest_generic_status *cache_estatus;
len = cper_estatus_len(estatus);
rcu_read_lock();
@@ -573,12 +573,12 @@
static struct ghes_estatus_cache *ghes_estatus_cache_alloc(
struct acpi_hest_generic *generic,
- struct acpi_generic_status *estatus)
+ struct acpi_hest_generic_status *estatus)
{
int alloced;
u32 len, cache_len;
struct ghes_estatus_cache *cache;
- struct acpi_generic_status *cache_estatus;
+ struct acpi_hest_generic_status *cache_estatus;
alloced = atomic_add_return(1, &ghes_estatus_cache_alloced);
if (alloced > GHES_ESTATUS_CACHE_ALLOCED_MAX) {
@@ -621,7 +621,7 @@
static void ghes_estatus_cache_add(
struct acpi_hest_generic *generic,
- struct acpi_generic_status *estatus)
+ struct acpi_hest_generic_status *estatus)
{
int i, slot = -1, count;
unsigned long long now, duration, period, max_period = 0;
@@ -753,7 +753,7 @@
struct llist_node *llnode, *next;
struct ghes_estatus_node *estatus_node;
struct acpi_hest_generic *generic;
- struct acpi_generic_status *estatus;
+ struct acpi_hest_generic_status *estatus;
u32 len, node_len;
llnode = llist_del_all(&ghes_estatus_llist);
@@ -786,7 +786,7 @@
struct llist_node *llnode;
struct ghes_estatus_node *estatus_node;
struct acpi_hest_generic *generic;
- struct acpi_generic_status *estatus;
+ struct acpi_hest_generic_status *estatus;
u32 len, node_len;
llnode = llist_del_all(&ghes_estatus_llist);
@@ -845,7 +845,7 @@
#ifdef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
u32 len, node_len;
struct ghes_estatus_node *estatus_node;
- struct acpi_generic_status *estatus;
+ struct acpi_hest_generic_status *estatus;
#endif
if (!(ghes->flags & GHES_TO_CLEAR))
continue;
@@ -925,7 +925,7 @@
rc = -EIO;
if (generic->error_block_length <
- sizeof(struct acpi_generic_status)) {
+ sizeof(struct acpi_hest_generic_status)) {
pr_warning(FW_BUG GHES_PFX "Invalid error block length: %u for generic hardware error source: %d\n",
generic->error_block_length,
generic->header.source_id);
diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
index 3d8413d..36eb42e 100644
--- a/drivers/acpi/blacklist.c
+++ b/drivers/acpi/blacklist.c
@@ -247,75 +247,11 @@
},
/*
- * The following machines have broken backlight support when reporting
- * the Windows 2012 OSI, so disable it until their support is fixed.
+ * These machines will power on immediately after shutdown when
+ * reporting the Windows 2012 OSI.
*/
{
.callback = dmi_disable_osi_win8,
- .ident = "ASUS Zenbook Prime UX31A",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "UX31A"),
- },
- },
- {
- .callback = dmi_disable_osi_win8,
- .ident = "ThinkPad Edge E530",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "3259A2G"),
- },
- },
- {
- .callback = dmi_disable_osi_win8,
- .ident = "ThinkPad Edge E530",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "3259CTO"),
- },
- },
- {
- .callback = dmi_disable_osi_win8,
- .ident = "ThinkPad Edge E530",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "3259HJG"),
- },
- },
- {
- .callback = dmi_disable_osi_win8,
- .ident = "Acer Aspire V5-573G",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer Aspire"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "V5-573G/Dazzle_HW"),
- },
- },
- {
- .callback = dmi_disable_osi_win8,
- .ident = "Acer Aspire V5-572G",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer Aspire"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "V5-572G/Dazzle_CX"),
- },
- },
- {
- .callback = dmi_disable_osi_win8,
- .ident = "ThinkPad T431s",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "20AACTO1WW"),
- },
- },
- {
- .callback = dmi_disable_osi_win8,
- .ident = "ThinkPad T430",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "2349D15"),
- },
- },
- {
- .callback = dmi_disable_osi_win8,
.ident = "Dell Inspiron 7737",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index c5bc8cf..8581f5b 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -477,9 +477,6 @@
return 0;
}
-u8 acpi_gbl_permanent_mmap;
-
-
void __init acpi_early_init(void)
{
acpi_status status;
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index db35594..6d5d183 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -79,11 +79,13 @@
static void acpi_button_notify(struct acpi_device *device, u32 event);
#ifdef CONFIG_PM_SLEEP
+static int acpi_button_suspend(struct device *dev);
static int acpi_button_resume(struct device *dev);
#else
+#define acpi_button_suspend NULL
#define acpi_button_resume NULL
#endif
-static SIMPLE_DEV_PM_OPS(acpi_button_pm, NULL, acpi_button_resume);
+static SIMPLE_DEV_PM_OPS(acpi_button_pm, acpi_button_suspend, acpi_button_resume);
static struct acpi_driver acpi_button_driver = {
.name = "button",
@@ -102,6 +104,7 @@
struct input_dev *input;
char phys[32]; /* for input device */
unsigned long pushed;
+ bool suspended;
};
static BLOCKING_NOTIFIER_HEAD(acpi_lid_notifier);
@@ -293,15 +296,19 @@
if (button->type == ACPI_BUTTON_TYPE_LID) {
acpi_lid_send_state(device);
} else {
- int keycode = test_bit(KEY_SLEEP, input->keybit) ?
- KEY_SLEEP : KEY_POWER;
+ int keycode;
+ pm_wakeup_event(&device->dev, 0);
+ if (button->suspended)
+ break;
+
+ keycode = test_bit(KEY_SLEEP, input->keybit) ?
+ KEY_SLEEP : KEY_POWER;
input_report_key(input, keycode, 1);
input_sync(input);
input_report_key(input, keycode, 0);
input_sync(input);
- pm_wakeup_event(&device->dev, 0);
acpi_bus_generate_netlink_event(
device->pnp.device_class,
dev_name(&device->dev),
@@ -316,11 +323,21 @@
}
#ifdef CONFIG_PM_SLEEP
+static int acpi_button_suspend(struct device *dev)
+{
+ struct acpi_device *device = to_acpi_device(dev);
+ struct acpi_button *button = acpi_driver_data(device);
+
+ button->suspended = true;
+ return 0;
+}
+
static int acpi_button_resume(struct device *dev)
{
struct acpi_device *device = to_acpi_device(dev);
struct acpi_button *button = acpi_driver_data(device);
+ button->suspended = false;
if (button->type == ACPI_BUTTON_TYPE_LID)
return acpi_lid_send_state(device);
return 0;
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index 49a5127..67075f8 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -367,29 +367,61 @@
#ifdef CONFIG_PM
static DEFINE_MUTEX(acpi_pm_notifier_lock);
+static void acpi_pm_notify_handler(acpi_handle handle, u32 val, void *not_used)
+{
+ struct acpi_device *adev;
+
+ if (val != ACPI_NOTIFY_DEVICE_WAKE)
+ return;
+
+ adev = acpi_bus_get_acpi_device(handle);
+ if (!adev)
+ return;
+
+ mutex_lock(&acpi_pm_notifier_lock);
+
+ if (adev->wakeup.flags.notifier_present) {
+ __pm_wakeup_event(adev->wakeup.ws, 0);
+ if (adev->wakeup.context.work.func)
+ queue_pm_work(&adev->wakeup.context.work);
+ }
+
+ mutex_unlock(&acpi_pm_notifier_lock);
+
+ acpi_bus_put_acpi_device(adev);
+}
+
/**
- * acpi_add_pm_notifier - Register PM notifier for given ACPI device.
- * @adev: ACPI device to add the notifier for.
- * @context: Context information to pass to the notifier routine.
+ * acpi_add_pm_notifier - Register PM notify handler for given ACPI device.
+ * @adev: ACPI device to add the notify handler for.
+ * @dev: Device to generate a wakeup event for while handling the notification.
+ * @work_func: Work function to execute when handling the notification.
*
* NOTE: @adev need not be a run-wake or wakeup device to be a valid source of
* PM wakeup events. For example, wakeup events may be generated for bridges
* if one of the devices below the bridge is signaling wakeup, even if the
* bridge itself doesn't have a wakeup GPE associated with it.
*/
-acpi_status acpi_add_pm_notifier(struct acpi_device *adev,
- acpi_notify_handler handler, void *context)
+acpi_status acpi_add_pm_notifier(struct acpi_device *adev, struct device *dev,
+ void (*work_func)(struct work_struct *work))
{
acpi_status status = AE_ALREADY_EXISTS;
+ if (!dev && !work_func)
+ return AE_BAD_PARAMETER;
+
mutex_lock(&acpi_pm_notifier_lock);
if (adev->wakeup.flags.notifier_present)
goto out;
- status = acpi_install_notify_handler(adev->handle,
- ACPI_SYSTEM_NOTIFY,
- handler, context);
+ adev->wakeup.ws = wakeup_source_register(dev_name(&adev->dev));
+ adev->wakeup.context.dev = dev;
+ if (work_func)
+ INIT_WORK(&adev->wakeup.context.work, work_func);
+
+ status = acpi_install_notify_handler(adev->handle, ACPI_SYSTEM_NOTIFY,
+ acpi_pm_notify_handler, NULL);
if (ACPI_FAILURE(status))
goto out;
@@ -404,8 +436,7 @@
* acpi_remove_pm_notifier - Unregister PM notifier from given ACPI device.
* @adev: ACPI device to remove the notifier from.
*/
-acpi_status acpi_remove_pm_notifier(struct acpi_device *adev,
- acpi_notify_handler handler)
+acpi_status acpi_remove_pm_notifier(struct acpi_device *adev)
{
acpi_status status = AE_BAD_PARAMETER;
@@ -416,10 +447,17 @@
status = acpi_remove_notify_handler(adev->handle,
ACPI_SYSTEM_NOTIFY,
- handler);
+ acpi_pm_notify_handler);
if (ACPI_FAILURE(status))
goto out;
+ if (adev->wakeup.context.work.func) {
+ cancel_work_sync(&adev->wakeup.context.work);
+ adev->wakeup.context.work.func = NULL;
+ }
+ adev->wakeup.context.dev = NULL;
+ wakeup_source_unregister(adev->wakeup.ws);
+
adev->wakeup.flags.notifier_present = false;
out:
@@ -558,7 +596,6 @@
*/
int acpi_pm_device_sleep_state(struct device *dev, int *d_min_p, int d_max_in)
{
- acpi_handle handle = ACPI_HANDLE(dev);
struct acpi_device *adev;
int ret, d_min, d_max;
@@ -573,8 +610,9 @@
d_max_in = ACPI_STATE_D3_HOT;
}
- if (!handle || acpi_bus_get_device(handle, &adev)) {
- dev_dbg(dev, "ACPI handle without context in %s!\n", __func__);
+ adev = ACPI_COMPANION(dev);
+ if (!adev) {
+ dev_dbg(dev, "ACPI companion missing in %s!\n", __func__);
return -ENODEV;
}
@@ -600,26 +638,25 @@
}
EXPORT_SYMBOL(acpi_pm_device_sleep_state);
-#ifdef CONFIG_PM_RUNTIME
/**
- * acpi_wakeup_device - Wakeup notification handler for ACPI devices.
- * @handle: ACPI handle of the device the notification is for.
- * @event: Type of the signaled event.
- * @context: Device corresponding to @handle.
+ * acpi_pm_notify_work_func - ACPI devices wakeup notification work function.
+ * @work: Work item to handle.
*/
-static void acpi_wakeup_device(acpi_handle handle, u32 event, void *context)
+static void acpi_pm_notify_work_func(struct work_struct *work)
{
- struct device *dev = context;
+ struct device *dev;
- if (event == ACPI_NOTIFY_DEVICE_WAKE && dev) {
+ dev = container_of(work, struct acpi_device_wakeup_context, work)->dev;
+ if (dev) {
pm_wakeup_event(dev, 0);
pm_runtime_resume(dev);
}
}
/**
- * __acpi_device_run_wake - Enable/disable runtime remote wakeup for device.
- * @adev: ACPI device to enable/disable the remote wakeup for.
+ * acpi_device_wakeup - Enable/disable wakeup functionality for device.
+ * @adev: ACPI device to enable/disable wakeup functionality for.
+ * @target_state: State the system is transitioning into.
* @enable: Whether to enable or disable the wakeup functionality.
*
* Enable/disable the GPE associated with @adev so that it can generate
@@ -629,7 +666,8 @@
* Callers must ensure that @adev is a valid ACPI device node before executing
* this function.
*/
-int __acpi_device_run_wake(struct acpi_device *adev, bool enable)
+static int acpi_device_wakeup(struct acpi_device *adev, u32 target_state,
+ bool enable)
{
struct acpi_device_wakeup *wakeup = &adev->wakeup;
@@ -637,7 +675,7 @@
acpi_status res;
int error;
- error = acpi_enable_wakeup_device_power(adev, ACPI_STATE_S0);
+ error = acpi_enable_wakeup_device_power(adev, target_state);
if (error)
return error;
@@ -653,6 +691,7 @@
return 0;
}
+#ifdef CONFIG_PM_RUNTIME
/**
* acpi_pm_device_run_wake - Enable/disable remote wakeup for given device.
* @dev: Device to enable/disable the platform to wake up.
@@ -661,63 +700,42 @@
int acpi_pm_device_run_wake(struct device *phys_dev, bool enable)
{
struct acpi_device *adev;
- acpi_handle handle;
if (!device_run_wake(phys_dev))
return -EINVAL;
- handle = ACPI_HANDLE(phys_dev);
- if (!handle || acpi_bus_get_device(handle, &adev)) {
- dev_dbg(phys_dev, "ACPI handle without context in %s!\n",
- __func__);
+ adev = ACPI_COMPANION(phys_dev);
+ if (!adev) {
+ dev_dbg(phys_dev, "ACPI companion missing in %s!\n", __func__);
return -ENODEV;
}
- return __acpi_device_run_wake(adev, enable);
+ return acpi_device_wakeup(adev, enable, ACPI_STATE_S0);
}
EXPORT_SYMBOL(acpi_pm_device_run_wake);
-#else
-static inline void acpi_wakeup_device(acpi_handle handle, u32 event,
- void *context) {}
#endif /* CONFIG_PM_RUNTIME */
#ifdef CONFIG_PM_SLEEP
/**
- * __acpi_device_sleep_wake - Enable or disable device to wake up the system.
- * @dev: Device to enable/desible to wake up the system.
- * @target_state: System state the device is supposed to wake up from.
- * @enable: Whether to enable or disable @dev to wake up the system.
- */
-int __acpi_device_sleep_wake(struct acpi_device *adev, u32 target_state,
- bool enable)
-{
- return enable ?
- acpi_enable_wakeup_device_power(adev, target_state) :
- acpi_disable_wakeup_device_power(adev);
-}
-
-/**
* acpi_pm_device_sleep_wake - Enable or disable device to wake up the system.
* @dev: Device to enable/desible to wake up the system from sleep states.
* @enable: Whether to enable or disable @dev to wake up the system.
*/
int acpi_pm_device_sleep_wake(struct device *dev, bool enable)
{
- acpi_handle handle;
struct acpi_device *adev;
int error;
if (!device_can_wakeup(dev))
return -EINVAL;
- handle = ACPI_HANDLE(dev);
- if (!handle || acpi_bus_get_device(handle, &adev)) {
- dev_dbg(dev, "ACPI handle without context in %s!\n", __func__);
+ adev = ACPI_COMPANION(dev);
+ if (!adev) {
+ dev_dbg(dev, "ACPI companion missing in %s!\n", __func__);
return -ENODEV;
}
- error = __acpi_device_sleep_wake(adev, acpi_target_system_state(),
- enable);
+ error = acpi_device_wakeup(adev, acpi_target_system_state(), enable);
if (!error)
dev_info(dev, "System wakeup %s by ACPI\n",
enable ? "enabled" : "disabled");
@@ -775,13 +793,13 @@
remote_wakeup = dev_pm_qos_flags(dev, PM_QOS_FLAG_REMOTE_WAKEUP) >
PM_QOS_FLAGS_NONE;
- error = __acpi_device_run_wake(adev, remote_wakeup);
+ error = acpi_device_wakeup(adev, ACPI_STATE_S0, remote_wakeup);
if (remote_wakeup && error)
return -EAGAIN;
error = acpi_dev_pm_low_power(dev, adev, ACPI_STATE_S0);
if (error)
- __acpi_device_run_wake(adev, false);
+ acpi_device_wakeup(adev, ACPI_STATE_S0, false);
return error;
}
@@ -804,7 +822,7 @@
return 0;
error = acpi_dev_pm_full_power(adev);
- __acpi_device_run_wake(adev, false);
+ acpi_device_wakeup(adev, ACPI_STATE_S0, false);
return error;
}
EXPORT_SYMBOL_GPL(acpi_dev_runtime_resume);
@@ -860,13 +878,13 @@
target_state = acpi_target_system_state();
wakeup = device_may_wakeup(dev);
- error = __acpi_device_sleep_wake(adev, target_state, wakeup);
+ error = acpi_device_wakeup(adev, target_state, wakeup);
if (wakeup && error)
return error;
error = acpi_dev_pm_low_power(dev, adev, target_state);
if (error)
- __acpi_device_sleep_wake(adev, ACPI_STATE_UNKNOWN, false);
+ acpi_device_wakeup(adev, ACPI_STATE_UNKNOWN, false);
return error;
}
@@ -889,7 +907,7 @@
return 0;
error = acpi_dev_pm_full_power(adev);
- __acpi_device_sleep_wake(adev, ACPI_STATE_UNKNOWN, false);
+ acpi_device_wakeup(adev, ACPI_STATE_UNKNOWN, false);
return error;
}
EXPORT_SYMBOL_GPL(acpi_dev_resume_early);
@@ -1048,11 +1066,11 @@
if (dev->pm_domain)
return -EEXIST;
- acpi_add_pm_notifier(adev, acpi_wakeup_device, dev);
+ acpi_add_pm_notifier(adev, dev, acpi_pm_notify_work_func);
dev->pm_domain = &acpi_general_pm_domain;
if (power_on) {
acpi_dev_pm_full_power(adev);
- __acpi_device_run_wake(adev, false);
+ acpi_device_wakeup(adev, ACPI_STATE_S0, false);
}
return 0;
}
@@ -1076,7 +1094,7 @@
if (adev && dev->pm_domain == &acpi_general_pm_domain) {
dev->pm_domain = NULL;
- acpi_remove_pm_notifier(adev, acpi_wakeup_device);
+ acpi_remove_pm_notifier(adev);
if (power_off) {
/*
* If the device's PM QoS resume latency limit or flags
@@ -1086,7 +1104,7 @@
*/
dev_pm_qos_hide_latency_limit(dev);
dev_pm_qos_hide_flags(dev);
- __acpi_device_run_wake(adev, false);
+ acpi_device_wakeup(adev, ACPI_STATE_S0, false);
acpi_dev_pm_low_power(dev, adev, ACPI_STATE_S0);
}
}
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 7de5b60..4c5cf77 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -84,8 +84,6 @@
int type, unsigned long long sta);
void acpi_device_add_finalize(struct acpi_device *device);
void acpi_free_pnp_ids(struct acpi_device_pnp *pnp);
-int acpi_bind_one(struct device *dev, struct acpi_device *adev);
-int acpi_unbind_one(struct device *dev);
bool acpi_device_is_present(struct acpi_device *adev);
bool acpi_device_is_battery(struct acpi_device *adev);
@@ -108,7 +106,12 @@
int acpi_device_update_power(struct acpi_device *device, int *state_p);
int acpi_wakeup_device_init(void);
+
+#ifdef CONFIG_ARCH_MIGHT_HAVE_ACPI_PDC
void acpi_early_processor_set_pdc(void);
+#else
+static inline void acpi_early_processor_set_pdc(void) {}
+#endif
/* --------------------------------------------------------------------------
Embedded Controller
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index bad25b0..3abe9b2 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -259,12 +259,14 @@
"System description tables not found\n");
return 0;
}
- } else {
+ } else if (IS_ENABLED(CONFIG_ACPI_LEGACY_TABLES_LOOKUP)) {
acpi_physical_address pa = 0;
acpi_find_root_pointer(&pa);
return pa;
}
+
+ return 0;
}
/* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index d388f13..e6ae603 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -593,7 +593,7 @@
if (no_aspm)
pcie_no_aspm();
- pci_acpi_add_bus_pm_notifier(device, root->bus);
+ pci_acpi_add_bus_pm_notifier(device);
if (device->wakeup.flags.run_wake)
device_set_run_wake(root->bus->bridge, true);
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index 71e2065..00f48d1 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -4,17 +4,11 @@
*
* Alex Chiang <achiang@hp.com>
* - Unified x86/ia64 implementations
- * Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
- * - Added _PDC for platforms with Intel CPUs
*/
#include <linux/export.h>
-#include <linux/dmi.h>
-#include <linux/slab.h>
#include <linux/acpi.h>
#include <acpi/processor.h>
-#include "internal.h"
-
#define _COMPONENT ACPI_PROCESSOR_COMPONENT
ACPI_MODULE_NAME("processor_core");
@@ -208,195 +202,3 @@
return acpi_map_cpuid(apic_id, acpi_id);
}
EXPORT_SYMBOL_GPL(acpi_get_cpuid);
-
-static bool __init processor_physically_present(acpi_handle handle)
-{
- int cpuid, type;
- u32 acpi_id;
- acpi_status status;
- acpi_object_type acpi_type;
- unsigned long long tmp;
- union acpi_object object = { 0 };
- struct acpi_buffer buffer = { sizeof(union acpi_object), &object };
-
- status = acpi_get_type(handle, &acpi_type);
- if (ACPI_FAILURE(status))
- return false;
-
- switch (acpi_type) {
- case ACPI_TYPE_PROCESSOR:
- status = acpi_evaluate_object(handle, NULL, NULL, &buffer);
- if (ACPI_FAILURE(status))
- return false;
- acpi_id = object.processor.proc_id;
- break;
- case ACPI_TYPE_DEVICE:
- status = acpi_evaluate_integer(handle, "_UID", NULL, &tmp);
- if (ACPI_FAILURE(status))
- return false;
- acpi_id = tmp;
- break;
- default:
- return false;
- }
-
- type = (acpi_type == ACPI_TYPE_DEVICE) ? 1 : 0;
- cpuid = acpi_get_cpuid(handle, type, acpi_id);
-
- if (cpuid == -1)
- return false;
-
- return true;
-}
-
-static void acpi_set_pdc_bits(u32 *buf)
-{
- buf[0] = ACPI_PDC_REVISION_ID;
- buf[1] = 1;
-
- /* Enable coordination with firmware's _TSD info */
- buf[2] = ACPI_PDC_SMP_T_SWCOORD;
-
- /* Twiddle arch-specific bits needed for _PDC */
- arch_acpi_set_pdc_bits(buf);
-}
-
-static struct acpi_object_list *acpi_processor_alloc_pdc(void)
-{
- struct acpi_object_list *obj_list;
- union acpi_object *obj;
- u32 *buf;
-
- /* allocate and initialize pdc. It will be used later. */
- obj_list = kmalloc(sizeof(struct acpi_object_list), GFP_KERNEL);
- if (!obj_list) {
- printk(KERN_ERR "Memory allocation error\n");
- return NULL;
- }
-
- obj = kmalloc(sizeof(union acpi_object), GFP_KERNEL);
- if (!obj) {
- printk(KERN_ERR "Memory allocation error\n");
- kfree(obj_list);
- return NULL;
- }
-
- buf = kmalloc(12, GFP_KERNEL);
- if (!buf) {
- printk(KERN_ERR "Memory allocation error\n");
- kfree(obj);
- kfree(obj_list);
- return NULL;
- }
-
- acpi_set_pdc_bits(buf);
-
- obj->type = ACPI_TYPE_BUFFER;
- obj->buffer.length = 12;
- obj->buffer.pointer = (u8 *) buf;
- obj_list->count = 1;
- obj_list->pointer = obj;
-
- return obj_list;
-}
-
-/*
- * _PDC is required for a BIOS-OS handshake for most of the newer
- * ACPI processor features.
- */
-static acpi_status
-acpi_processor_eval_pdc(acpi_handle handle, struct acpi_object_list *pdc_in)
-{
- acpi_status status = AE_OK;
-
- if (boot_option_idle_override == IDLE_NOMWAIT) {
- /*
- * If mwait is disabled for CPU C-states, the C2C3_FFH access
- * mode will be disabled in the parameter of _PDC object.
- * Of course C1_FFH access mode will also be disabled.
- */
- union acpi_object *obj;
- u32 *buffer = NULL;
-
- obj = pdc_in->pointer;
- buffer = (u32 *)(obj->buffer.pointer);
- buffer[2] &= ~(ACPI_PDC_C_C2C3_FFH | ACPI_PDC_C_C1_FFH);
-
- }
- status = acpi_evaluate_object(handle, "_PDC", pdc_in, NULL);
-
- if (ACPI_FAILURE(status))
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Could not evaluate _PDC, using legacy perf. control.\n"));
-
- return status;
-}
-
-void acpi_processor_set_pdc(acpi_handle handle)
-{
- struct acpi_object_list *obj_list;
-
- if (arch_has_acpi_pdc() == false)
- return;
-
- obj_list = acpi_processor_alloc_pdc();
- if (!obj_list)
- return;
-
- acpi_processor_eval_pdc(handle, obj_list);
-
- kfree(obj_list->pointer->buffer.pointer);
- kfree(obj_list->pointer);
- kfree(obj_list);
-}
-
-static acpi_status __init
-early_init_pdc(acpi_handle handle, u32 lvl, void *context, void **rv)
-{
- if (processor_physically_present(handle) == false)
- return AE_OK;
-
- acpi_processor_set_pdc(handle);
- return AE_OK;
-}
-
-#if defined(CONFIG_X86) || defined(CONFIG_IA64)
-static int __init set_no_mwait(const struct dmi_system_id *id)
-{
- pr_notice(PREFIX "%s detected - disabling mwait for CPU C-states\n",
- id->ident);
- boot_option_idle_override = IDLE_NOMWAIT;
- return 0;
-}
-
-static struct dmi_system_id processor_idle_dmi_table[] __initdata = {
- {
- set_no_mwait, "Extensa 5220", {
- DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"),
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "0100"),
- DMI_MATCH(DMI_BOARD_NAME, "Columbia") }, NULL},
- {},
-};
-
-static void __init processor_dmi_check(void)
-{
- /*
- * Check whether the system is DMI table. If yes, OSPM
- * should not use mwait for CPU-states.
- */
- dmi_check_system(processor_idle_dmi_table);
-}
-#else
-static inline void processor_dmi_check(void) {}
-#endif
-
-void __init acpi_early_processor_set_pdc(void)
-{
- processor_dmi_check();
-
- acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
- ACPI_UINT32_MAX,
- early_init_pdc, NULL, NULL, NULL);
- acpi_get_devices(ACPI_PROCESSOR_DEVICE_HID, early_init_pdc, NULL, NULL);
-}
diff --git a/drivers/acpi/processor_pdc.c b/drivers/acpi/processor_pdc.c
new file mode 100644
index 0000000..e5dd808
--- /dev/null
+++ b/drivers/acpi/processor_pdc.c
@@ -0,0 +1,206 @@
+/*
+ * Copyright (C) 2005 Intel Corporation
+ * Copyright (C) 2009 Hewlett-Packard Development Company, L.P.
+ *
+ * Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
+ * - Added _PDC for platforms with Intel CPUs
+ */
+
+#define pr_fmt(fmt) "ACPI: " fmt
+
+#include <linux/dmi.h>
+#include <linux/slab.h>
+#include <linux/acpi.h>
+#include <acpi/processor.h>
+
+#include "internal.h"
+
+#define _COMPONENT ACPI_PROCESSOR_COMPONENT
+ACPI_MODULE_NAME("processor_pdc");
+
+static bool __init processor_physically_present(acpi_handle handle)
+{
+ int cpuid, type;
+ u32 acpi_id;
+ acpi_status status;
+ acpi_object_type acpi_type;
+ unsigned long long tmp;
+ union acpi_object object = { 0 };
+ struct acpi_buffer buffer = { sizeof(union acpi_object), &object };
+
+ status = acpi_get_type(handle, &acpi_type);
+ if (ACPI_FAILURE(status))
+ return false;
+
+ switch (acpi_type) {
+ case ACPI_TYPE_PROCESSOR:
+ status = acpi_evaluate_object(handle, NULL, NULL, &buffer);
+ if (ACPI_FAILURE(status))
+ return false;
+ acpi_id = object.processor.proc_id;
+ break;
+ case ACPI_TYPE_DEVICE:
+ status = acpi_evaluate_integer(handle, "_UID", NULL, &tmp);
+ if (ACPI_FAILURE(status))
+ return false;
+ acpi_id = tmp;
+ break;
+ default:
+ return false;
+ }
+
+ type = (acpi_type == ACPI_TYPE_DEVICE) ? 1 : 0;
+ cpuid = acpi_get_cpuid(handle, type, acpi_id);
+
+ if (cpuid == -1)
+ return false;
+
+ return true;
+}
+
+static void acpi_set_pdc_bits(u32 *buf)
+{
+ buf[0] = ACPI_PDC_REVISION_ID;
+ buf[1] = 1;
+
+ /* Enable coordination with firmware's _TSD info */
+ buf[2] = ACPI_PDC_SMP_T_SWCOORD;
+
+ /* Twiddle arch-specific bits needed for _PDC */
+ arch_acpi_set_pdc_bits(buf);
+}
+
+static struct acpi_object_list *acpi_processor_alloc_pdc(void)
+{
+ struct acpi_object_list *obj_list;
+ union acpi_object *obj;
+ u32 *buf;
+
+ /* allocate and initialize pdc. It will be used later. */
+ obj_list = kmalloc(sizeof(struct acpi_object_list), GFP_KERNEL);
+ if (!obj_list)
+ goto out;
+
+ obj = kmalloc(sizeof(union acpi_object), GFP_KERNEL);
+ if (!obj) {
+ kfree(obj_list);
+ goto out;
+ }
+
+ buf = kmalloc(12, GFP_KERNEL);
+ if (!buf) {
+ kfree(obj);
+ kfree(obj_list);
+ goto out;
+ }
+
+ acpi_set_pdc_bits(buf);
+
+ obj->type = ACPI_TYPE_BUFFER;
+ obj->buffer.length = 12;
+ obj->buffer.pointer = (u8 *) buf;
+ obj_list->count = 1;
+ obj_list->pointer = obj;
+
+ return obj_list;
+out:
+ pr_err("Memory allocation error\n");
+ return NULL;
+}
+
+/*
+ * _PDC is required for a BIOS-OS handshake for most of the newer
+ * ACPI processor features.
+ */
+static acpi_status
+acpi_processor_eval_pdc(acpi_handle handle, struct acpi_object_list *pdc_in)
+{
+ acpi_status status = AE_OK;
+
+ if (boot_option_idle_override == IDLE_NOMWAIT) {
+ /*
+ * If mwait is disabled for CPU C-states, the C2C3_FFH access
+ * mode will be disabled in the parameter of _PDC object.
+ * Of course C1_FFH access mode will also be disabled.
+ */
+ union acpi_object *obj;
+ u32 *buffer = NULL;
+
+ obj = pdc_in->pointer;
+ buffer = (u32 *)(obj->buffer.pointer);
+ buffer[2] &= ~(ACPI_PDC_C_C2C3_FFH | ACPI_PDC_C_C1_FFH);
+
+ }
+ status = acpi_evaluate_object(handle, "_PDC", pdc_in, NULL);
+
+ if (ACPI_FAILURE(status))
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Could not evaluate _PDC, using legacy perf. control.\n"));
+
+ return status;
+}
+
+void acpi_processor_set_pdc(acpi_handle handle)
+{
+ struct acpi_object_list *obj_list;
+
+ if (arch_has_acpi_pdc() == false)
+ return;
+
+ obj_list = acpi_processor_alloc_pdc();
+ if (!obj_list)
+ return;
+
+ acpi_processor_eval_pdc(handle, obj_list);
+
+ kfree(obj_list->pointer->buffer.pointer);
+ kfree(obj_list->pointer);
+ kfree(obj_list);
+}
+
+static acpi_status __init
+early_init_pdc(acpi_handle handle, u32 lvl, void *context, void **rv)
+{
+ if (processor_physically_present(handle) == false)
+ return AE_OK;
+
+ acpi_processor_set_pdc(handle);
+ return AE_OK;
+}
+
+static int __init set_no_mwait(const struct dmi_system_id *id)
+{
+ pr_notice("%s detected - disabling mwait for CPU C-states\n",
+ id->ident);
+ boot_option_idle_override = IDLE_NOMWAIT;
+ return 0;
+}
+
+static struct dmi_system_id processor_idle_dmi_table[] __initdata = {
+ {
+ set_no_mwait, "Extensa 5220", {
+ DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "0100"),
+ DMI_MATCH(DMI_BOARD_NAME, "Columbia") }, NULL},
+ {},
+};
+
+static void __init processor_dmi_check(void)
+{
+ /*
+ * Check whether the system is DMI table. If yes, OSPM
+ * should not use mwait for CPU-states.
+ */
+ dmi_check_system(processor_idle_dmi_table);
+}
+
+void __init acpi_early_processor_set_pdc(void)
+{
+ processor_dmi_check();
+
+ acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
+ ACPI_UINT32_MAX,
+ early_init_pdc, NULL, NULL, NULL);
+ acpi_get_devices(ACPI_PROCESSOR_DEVICE_HID, early_init_pdc, NULL, NULL);
+}
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index f775fa0..5d592e1 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -77,7 +77,9 @@
void (*uevent)(struct acpi_device *, u32))
{
acpi_lock_hp_context();
- acpi_set_hp_context(adev, hp, notify, uevent, NULL);
+ hp->notify = notify;
+ hp->uevent = uevent;
+ acpi_set_hp_context(adev, hp);
acpi_unlock_hp_context();
}
EXPORT_SYMBOL_GPL(acpi_initialize_hp_context);
@@ -1421,14 +1423,13 @@
wakeup->sleep_state = sleep_state;
}
}
- acpi_setup_gpe_for_wake(handle, wakeup->gpe_device, wakeup->gpe_number);
out:
kfree(buffer.pointer);
return err;
}
-static void acpi_bus_set_run_wake_flags(struct acpi_device *device)
+static void acpi_wakeup_gpe_init(struct acpi_device *device)
{
struct acpi_device_id button_device_ids[] = {
{"PNP0C0C", 0},
@@ -1436,29 +1437,33 @@
{"PNP0C0E", 0},
{"", 0},
};
+ struct acpi_device_wakeup *wakeup = &device->wakeup;
acpi_status status;
acpi_event_status event_status;
- device->wakeup.flags.notifier_present = 0;
+ wakeup->flags.notifier_present = 0;
/* Power button, Lid switch always enable wakeup */
if (!acpi_match_device_ids(device, button_device_ids)) {
- device->wakeup.flags.run_wake = 1;
+ wakeup->flags.run_wake = 1;
if (!acpi_match_device_ids(device, &button_device_ids[1])) {
/* Do not use Lid/sleep button for S5 wakeup */
- if (device->wakeup.sleep_state == ACPI_STATE_S5)
- device->wakeup.sleep_state = ACPI_STATE_S4;
+ if (wakeup->sleep_state == ACPI_STATE_S5)
+ wakeup->sleep_state = ACPI_STATE_S4;
}
+ acpi_mark_gpe_for_wake(wakeup->gpe_device, wakeup->gpe_number);
device_set_wakeup_capable(&device->dev, true);
return;
}
- status = acpi_get_gpe_status(device->wakeup.gpe_device,
- device->wakeup.gpe_number,
- &event_status);
- if (status == AE_OK)
- device->wakeup.flags.run_wake =
- !!(event_status & ACPI_EVENT_FLAG_HANDLE);
+ acpi_setup_gpe_for_wake(device->handle, wakeup->gpe_device,
+ wakeup->gpe_number);
+ status = acpi_get_gpe_status(wakeup->gpe_device, wakeup->gpe_number,
+ &event_status);
+ if (ACPI_FAILURE(status))
+ return;
+
+ wakeup->flags.run_wake = !!(event_status & ACPI_EVENT_FLAG_HANDLE);
}
static void acpi_bus_get_wakeup_device_flags(struct acpi_device *device)
@@ -1478,7 +1483,7 @@
device->wakeup.flags.valid = 1;
device->wakeup.prepare_count = 0;
- acpi_bus_set_run_wake_flags(device);
+ acpi_wakeup_gpe_init(device);
/* Call _PSW/_DSW object to disable its ability to wake the sleeping
* system for the ACPI device with the _PRW object.
* The _PSW object is depreciated in ACPI 3.0 and is replaced by _DSW.
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index b3e3cc7..54da4a3 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -322,6 +322,11 @@
static void acpi_sleep_dmi_check(void)
{
+ int year;
+
+ if (dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL) && year >= 2012)
+ acpi_nvs_nosave_s3();
+
dmi_check_system(acpisleep_dmi_table);
}
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 071c1df..18c0e69 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -68,7 +68,7 @@
MODULE_DESCRIPTION("ACPI Video Driver");
MODULE_LICENSE("GPL");
-static bool brightness_switch_enabled;
+static bool brightness_switch_enabled = 1;
module_param(brightness_switch_enabled, bool, 0644);
/*
@@ -204,6 +204,8 @@
struct acpi_video_device_flags flags;
struct acpi_video_device_cap cap;
struct list_head entry;
+ struct delayed_work switch_brightness_work;
+ int switch_brightness_event;
struct acpi_video_bus *video;
struct acpi_device *dev;
struct acpi_video_device_brightness *brightness;
@@ -230,8 +232,7 @@
unsigned long long *level, bool raw);
static int acpi_video_get_next_level(struct acpi_video_device *device,
u32 level_current, u32 event);
-static int acpi_video_switch_brightness(struct acpi_video_device *device,
- int event);
+static void acpi_video_switch_brightness(struct work_struct *work);
static bool acpi_video_use_native_backlight(void)
{
@@ -275,6 +276,7 @@
int request_level = bd->props.brightness + 2;
struct acpi_video_device *vd = bl_get_data(bd);
+ cancel_delayed_work(&vd->switch_brightness_work);
return acpi_video_device_lcd_set_level(vd,
vd->brightness->levels[request_level]);
}
@@ -461,6 +463,14 @@
},
{
.callback = video_set_use_native_backlight,
+ .ident = "ThinkPad X230",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X230"),
+ },
+ },
+ {
+ .callback = video_set_use_native_backlight,
.ident = "ThinkPad T430 and T430s",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
@@ -469,10 +479,42 @@
},
{
.callback = video_set_use_native_backlight,
- .ident = "ThinkPad X230",
+ .ident = "ThinkPad T430",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X230"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "2349D15"),
+ },
+ },
+ {
+ .callback = video_set_use_native_backlight,
+ .ident = "ThinkPad T431s",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "20AACTO1WW"),
+ },
+ },
+ {
+ .callback = video_set_use_native_backlight,
+ .ident = "ThinkPad Edge E530",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "3259A2G"),
+ },
+ },
+ {
+ .callback = video_set_use_native_backlight,
+ .ident = "ThinkPad Edge E530",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "3259CTO"),
+ },
+ },
+ {
+ .callback = video_set_use_native_backlight,
+ .ident = "ThinkPad Edge E530",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "3259HJG"),
},
},
{
@@ -572,6 +614,30 @@
},
},
{
+ .callback = video_set_use_native_backlight,
+ .ident = "Acer Aspire V5-572G",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer Aspire"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "V5-572G/Dazzle_CX"),
+ },
+ },
+ {
+ .callback = video_set_use_native_backlight,
+ .ident = "Acer Aspire V5-573G",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer Aspire"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "V5-573G/Dazzle_HW"),
+ },
+ },
+ {
+ .callback = video_set_use_native_backlight,
+ .ident = "ASUS Zenbook Prime UX31A",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "UX31A"),
+ },
+ },
+ {
.callback = video_set_use_native_backlight,
.ident = "HP ProBook 4340s",
.matches = {
@@ -581,6 +647,14 @@
},
{
.callback = video_set_use_native_backlight,
+ .ident = "HP ProBook 4540s",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "HP ProBook 4540s"),
+ },
+ },
+ {
+ .callback = video_set_use_native_backlight,
.ident = "HP ProBook 2013 models",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
@@ -1180,6 +1254,8 @@
data->device_id = device_id;
data->video = video;
data->dev = device;
+ INIT_DELAYED_WORK(&data->switch_brightness_work,
+ acpi_video_switch_brightness);
attribute = acpi_video_get_device_attr(video, device_id);
@@ -1402,15 +1478,18 @@
}
}
-static int
-acpi_video_switch_brightness(struct acpi_video_device *device, int event)
+static void
+acpi_video_switch_brightness(struct work_struct *work)
{
+ struct acpi_video_device *device = container_of(to_delayed_work(work),
+ struct acpi_video_device, switch_brightness_work);
unsigned long long level_current, level_next;
+ int event = device->switch_brightness_event;
int result = -EINVAL;
/* no warning message if acpi_backlight=vendor or a quirk is used */
if (!acpi_video_verify_backlight_support())
- return 0;
+ return;
if (!device->brightness)
goto out;
@@ -1432,8 +1511,6 @@
out:
if (result)
printk(KERN_ERR PREFIX "Failed to switch the brightness\n");
-
- return result;
}
int acpi_video_get_edid(struct acpi_device *device, int type, int device_id,
@@ -1601,6 +1678,16 @@
return;
}
+static void brightness_switch_event(struct acpi_video_device *video_device,
+ u32 event)
+{
+ if (!brightness_switch_enabled)
+ return;
+
+ video_device->switch_brightness_event = event;
+ schedule_delayed_work(&video_device->switch_brightness_work, HZ / 10);
+}
+
static void acpi_video_device_notify(acpi_handle handle, u32 event, void *data)
{
struct acpi_video_device *video_device = data;
@@ -1618,28 +1705,23 @@
switch (event) {
case ACPI_VIDEO_NOTIFY_CYCLE_BRIGHTNESS: /* Cycle brightness */
- if (brightness_switch_enabled)
- acpi_video_switch_brightness(video_device, event);
+ brightness_switch_event(video_device, event);
keycode = KEY_BRIGHTNESS_CYCLE;
break;
case ACPI_VIDEO_NOTIFY_INC_BRIGHTNESS: /* Increase brightness */
- if (brightness_switch_enabled)
- acpi_video_switch_brightness(video_device, event);
+ brightness_switch_event(video_device, event);
keycode = KEY_BRIGHTNESSUP;
break;
case ACPI_VIDEO_NOTIFY_DEC_BRIGHTNESS: /* Decrease brightness */
- if (brightness_switch_enabled)
- acpi_video_switch_brightness(video_device, event);
+ brightness_switch_event(video_device, event);
keycode = KEY_BRIGHTNESSDOWN;
break;
case ACPI_VIDEO_NOTIFY_ZERO_BRIGHTNESS: /* zero brightness */
- if (brightness_switch_enabled)
- acpi_video_switch_brightness(video_device, event);
+ brightness_switch_event(video_device, event);
keycode = KEY_BRIGHTNESS_ZERO;
break;
case ACPI_VIDEO_NOTIFY_DISPLAY_OFF: /* display device off */
- if (brightness_switch_enabled)
- acpi_video_switch_brightness(video_device, event);
+ brightness_switch_event(video_device, event);
keycode = KEY_DISPLAY_OFF;
break;
default:
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index dae5607..4cd52a4 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -456,6 +456,7 @@
/* Promise */
{ PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */
+ { PCI_VDEVICE(PROMISE, 0x3781), board_ahci }, /* FastTrak TX8660 ahci-mode */
/* Asmedia */
{ PCI_VDEVICE(ASMEDIA, 0x0601), board_ahci }, /* ASM1060 */
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 18d97d5..677c0c1 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4787,6 +4787,10 @@
* ata_qc_new - Request an available ATA command, for queueing
* @ap: target port
*
+ * Some ATA host controllers may implement a queue depth which is less
+ * than ATA_MAX_QUEUE. So we shouldn't allocate a tag which is beyond
+ * the hardware limitation.
+ *
* LOCKING:
* None.
*/
@@ -4794,14 +4798,15 @@
static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
{
struct ata_queued_cmd *qc = NULL;
+ unsigned int max_queue = ap->host->n_tags;
unsigned int i, tag;
/* no command while frozen */
if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
return NULL;
- for (i = 0; i < ATA_MAX_QUEUE; i++) {
- tag = (i + ap->last_tag + 1) % ATA_MAX_QUEUE;
+ for (i = 0, tag = ap->last_tag + 1; i < max_queue; i++, tag++) {
+ tag = tag < max_queue ? tag : 0;
/* the last tag is reserved for internal command. */
if (tag == ATA_TAG_INTERNAL)
@@ -6088,6 +6093,7 @@
{
spin_lock_init(&host->lock);
mutex_init(&host->eh_mutex);
+ host->n_tags = ATA_MAX_QUEUE - 1;
host->dev = dev;
host->ops = ops;
}
@@ -6169,6 +6175,8 @@
{
int i, rc;
+ host->n_tags = clamp(sht->can_queue, 1, ATA_MAX_QUEUE - 1);
+
/* host must have been started */
if (!(host->flags & ATA_HOST_STARTED)) {
dev_err(host->dev, "BUG: trying to register unstarted host\n");
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 6760fc4..dad83df 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -1811,7 +1811,7 @@
case ATA_DEV_ATA:
if (err & ATA_ICRC)
qc->err_mask |= AC_ERR_ATA_BUS;
- if (err & ATA_UNC)
+ if (err & (ATA_UNC | ATA_AMNF))
qc->err_mask |= AC_ERR_MEDIA;
if (err & ATA_IDNF)
qc->err_mask |= AC_ERR_INVALID;
@@ -2556,11 +2556,12 @@
}
if (cmd->command != ATA_CMD_PACKET &&
- (res->feature & (ATA_ICRC | ATA_UNC | ATA_IDNF |
- ATA_ABORTED)))
- ata_dev_err(qc->dev, "error: { %s%s%s%s}\n",
+ (res->feature & (ATA_ICRC | ATA_UNC | ATA_AMNF |
+ ATA_IDNF | ATA_ABORTED)))
+ ata_dev_err(qc->dev, "error: { %s%s%s%s%s}\n",
res->feature & ATA_ICRC ? "ICRC " : "",
res->feature & ATA_UNC ? "UNC " : "",
+ res->feature & ATA_AMNF ? "AMNF " : "",
res->feature & ATA_IDNF ? "IDNF " : "",
res->feature & ATA_ABORTED ? "ABRT " : "");
#endif
diff --git a/drivers/ata/pata_ep93xx.c b/drivers/ata/pata_ep93xx.c
index 6ad5c07..4d37c54 100644
--- a/drivers/ata/pata_ep93xx.c
+++ b/drivers/ata/pata_ep93xx.c
@@ -915,7 +915,7 @@
struct ep93xx_pata_data *drv_data;
struct ata_host *host;
struct ata_port *ap;
- unsigned int irq;
+ int irq;
struct resource *mem_res;
void __iomem *ide_base;
int err;
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 9e9227e..eee48c4 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -89,8 +89,13 @@
return dev->archdata.irqs[num];
#else
struct resource *r;
- if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node)
- return of_irq_get(dev->dev.of_node, num);
+ if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) {
+ int ret;
+
+ ret = of_irq_get(dev->dev.of_node, num);
+ if (ret >= 0 || ret == -EPROBE_DEFER)
+ return ret;
+ }
r = platform_get_resource(dev, IORESOURCE_IRQ, num);
@@ -133,8 +138,13 @@
{
struct resource *r;
- if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node)
- return of_irq_get_byname(dev->dev.of_node, name);
+ if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) {
+ int ret;
+
+ ret = of_irq_get_byname(dev->dev.of_node, name);
+ if (ret >= 0 || ret == -EPROBE_DEFER)
+ return ret;
+ }
r = platform_get_resource_byname(dev, IORESOURCE_IRQ, name);
return r ? r->start : -ENXIO;
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index bf41296..b67d9ae 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -465,6 +465,7 @@
* device_resume_noirq - Execute an "early resume" callback for given device.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
+ * @async: If true, the device is being resumed asynchronously.
*
* The driver of @dev will not receive interrupts while this function is being
* executed.
@@ -594,6 +595,7 @@
* device_resume_early - Execute an "early resume" callback for given device.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
+ * @async: If true, the device is being resumed asynchronously.
*
* Runtime PM is disabled for @dev while this function is being executed.
*/
@@ -1004,6 +1006,7 @@
* device_suspend_noirq - Execute a "late suspend" callback for given device.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
+ * @async: If true, the device is being suspended asynchronously.
*
* The driver of @dev will not receive interrupts while this function is being
* executed.
@@ -1144,6 +1147,7 @@
* device_suspend_late - Execute a "late suspend" callback for given device.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
+ * @async: If true, the device is being suspended asynchronously.
*
* Runtime PM is disabled for @dev while this function is being executed.
*/
@@ -1298,6 +1302,7 @@
* @dev: Device to suspend.
* @state: PM transition of the system being carried out.
* @cb: Suspend callback to execute.
+ * @info: string description of caller.
*/
static int legacy_suspend(struct device *dev, pm_message_t state,
int (*cb)(struct device *dev, pm_message_t state),
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index 1b35c45..3f2e167 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -544,6 +544,12 @@
struct task_struct *opa;
kref_get(&connection->kref);
+ /* We may just have force_sig()'ed this thread
+ * to get it out of some blocking network function.
+ * Clear signals; otherwise kthread_run(), which internally uses
+ * wait_on_completion_killable(), will mistake our pending signal
+ * for a new fatal signal and fail. */
+ flush_signals(current);
opa = kthread_run(_try_outdate_peer_async, connection, "drbd_async_h");
if (IS_ERR(opa)) {
drbd_err(connection, "out of mem, failed to invoke fence-peer helper\n");
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 089e72c..36e54be 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -622,11 +622,18 @@
memset(&zram->stats, 0, sizeof(zram->stats));
zram->disksize = 0;
- if (reset_capacity) {
+ if (reset_capacity)
set_capacity(zram->disk, 0);
- revalidate_disk(zram->disk);
- }
+
up_write(&zram->init_lock);
+
+ /*
+ * Revalidate disk out of the init_lock to avoid lockdep splat.
+ * It's okay because disk's capacity is protected by init_lock
+ * so that revalidate_disk always sees up-to-date capacity.
+ */
+ if (reset_capacity)
+ revalidate_disk(zram->disk);
}
static ssize_t disksize_store(struct device *dev,
@@ -666,8 +673,15 @@
zram->comp = comp;
zram->disksize = disksize;
set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
- revalidate_disk(zram->disk);
up_write(&zram->init_lock);
+
+ /*
+ * Revalidate disk out of the init_lock to avoid lockdep splat.
+ * It's okay because disk's capacity is protected by init_lock
+ * so that revalidate_disk always sees up-to-date capacity.
+ */
+ revalidate_disk(zram->disk);
+
return len;
out_destroy_comp:
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index f983806..f50dffc 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -90,7 +90,6 @@
{ USB_DEVICE(0x0b05, 0x17d0) },
{ USB_DEVICE(0x0CF3, 0x0036) },
{ USB_DEVICE(0x0CF3, 0x3004) },
- { USB_DEVICE(0x0CF3, 0x3005) },
{ USB_DEVICE(0x0CF3, 0x3008) },
{ USB_DEVICE(0x0CF3, 0x311D) },
{ USB_DEVICE(0x0CF3, 0x311E) },
@@ -140,7 +139,6 @@
{ USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0CF3, 0x0036), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x0cf3, 0x3005), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x311D), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x311E), .driver_info = BTUSB_ATH3012 },
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index a1c80b0..6250fc2 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -162,7 +162,6 @@
{ USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x0036), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x0cf3, 0x3005), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x311e), .driver_info = BTUSB_ATH3012 },
diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c
index 04680ea..fede8ca 100644
--- a/drivers/bluetooth/hci_h5.c
+++ b/drivers/bluetooth/hci_h5.c
@@ -406,6 +406,7 @@
H5_HDR_PKT_TYPE(hdr) != HCI_3WIRE_LINK_PKT) {
BT_ERR("Non-link packet received in non-active state");
h5_reset_rx(h5);
+ return 0;
}
h5->rx_func = h5_rx_payload;
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index 334601c..c4419ea 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -55,16 +55,41 @@
static int data_avail;
static u8 *rng_buffer;
+static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size,
+ int wait);
+
static size_t rng_buffer_size(void)
{
return SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES;
}
+static void add_early_randomness(struct hwrng *rng)
+{
+ unsigned char bytes[16];
+ int bytes_read;
+
+ /*
+ * Currently only virtio-rng cannot return data during device
+ * probe, and that's handled in virtio-rng.c itself. If there
+ * are more such devices, this call to rng_get_data can be
+ * made conditional here instead of doing it per-device.
+ */
+ bytes_read = rng_get_data(rng, bytes, sizeof(bytes), 1);
+ if (bytes_read > 0)
+ add_device_randomness(bytes, bytes_read);
+}
+
static inline int hwrng_init(struct hwrng *rng)
{
- if (!rng->init)
- return 0;
- return rng->init(rng);
+ if (rng->init) {
+ int ret;
+
+ ret = rng->init(rng);
+ if (ret)
+ return ret;
+ }
+ add_early_randomness(rng);
+ return 0;
}
static inline void hwrng_cleanup(struct hwrng *rng)
@@ -304,8 +329,6 @@
{
int err = -EINVAL;
struct hwrng *old_rng, *tmp;
- unsigned char bytes[16];
- int bytes_read;
if (rng->name == NULL ||
(rng->data_read == NULL && rng->read == NULL))
@@ -347,9 +370,17 @@
INIT_LIST_HEAD(&rng->list);
list_add_tail(&rng->list, &rng_list);
- bytes_read = rng_get_data(rng, bytes, sizeof(bytes), 1);
- if (bytes_read > 0)
- add_device_randomness(bytes, bytes_read);
+ if (old_rng && !rng->init) {
+ /*
+ * Use a new device's input to add some randomness to
+ * the system. If this rng device isn't going to be
+ * used right away, its init function hasn't been
+ * called yet; so only use the randomness from devices
+ * that don't need an init callback.
+ */
+ add_early_randomness(rng);
+ }
+
out_unlock:
mutex_unlock(&rng_mutex);
out:
diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c
index f3e7150..e9b15bc 100644
--- a/drivers/char/hw_random/virtio-rng.c
+++ b/drivers/char/hw_random/virtio-rng.c
@@ -38,6 +38,8 @@
int index;
};
+static bool probe_done;
+
static void random_recv_done(struct virtqueue *vq)
{
struct virtrng_info *vi = vq->vdev->priv;
@@ -67,6 +69,13 @@
int ret;
struct virtrng_info *vi = (struct virtrng_info *)rng->priv;
+ /*
+ * Don't ask host for data till we're setup. This call can
+ * happen during hwrng_register(), after commit d9e7972619.
+ */
+ if (unlikely(!probe_done))
+ return 0;
+
if (!vi->busy) {
vi->busy = true;
init_completion(&vi->have_data);
@@ -137,6 +146,7 @@
return err;
}
+ probe_done = true;
return 0;
}
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 0a7ac0a..71529e1 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -641,7 +641,7 @@
} while (unlikely(entropy_count < pool_size-2 && pnfrac));
}
- if (entropy_count < 0) {
+ if (unlikely(entropy_count < 0)) {
pr_warn("random: negative entropy/overflow: pool %s count %d\n",
r->name, entropy_count);
WARN_ON(1);
@@ -981,7 +981,7 @@
int reserved)
{
int entropy_count, orig;
- size_t ibytes;
+ size_t ibytes, nfrac;
BUG_ON(r->entropy_count > r->poolinfo->poolfracbits);
@@ -999,7 +999,17 @@
}
if (ibytes < min)
ibytes = 0;
- if ((entropy_count -= ibytes << (ENTROPY_SHIFT + 3)) < 0)
+
+ if (unlikely(entropy_count < 0)) {
+ pr_warn("random: negative entropy count: pool %s count %d\n",
+ r->name, entropy_count);
+ WARN_ON(1);
+ entropy_count = 0;
+ }
+ nfrac = ibytes << (ENTROPY_SHIFT + 3);
+ if ((size_t) entropy_count > nfrac)
+ entropy_count -= nfrac;
+ else
entropy_count = 0;
if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
@@ -1376,6 +1386,7 @@
"with %d bits of entropy available\n",
current->comm, nonblocking_pool.entropy_total);
+ nbytes = min_t(size_t, nbytes, INT_MAX >> (ENTROPY_SHIFT + 3));
ret = extract_entropy_user(&nonblocking_pool, buf, nbytes);
trace_urandom_read(8 * nbytes, ENTROPY_BITS(&nonblocking_pool),
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index ebac671..7364a53 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -104,6 +104,7 @@
tristate "Freescale i.MX6 cpufreq support"
depends on ARCH_MXC
depends on REGULATOR_ANATOP
+ select PM_OPP
help
This adds cpufreq driver support for Freescale i.MX6 series SoCs.
@@ -118,7 +119,7 @@
If in doubt, say Y.
config ARM_KIRKWOOD_CPUFREQ
- def_bool MACH_KIRKWOOD
+ def_bool ARCH_KIRKWOOD || MACH_KIRKWOOD
help
This adds the CPUFreq driver for Marvell Kirkwood
SoCs.
diff --git a/drivers/cpufreq/cpufreq-cpu0.c b/drivers/cpufreq/cpufreq-cpu0.c
index ee1ae30..86beda9 100644
--- a/drivers/cpufreq/cpufreq-cpu0.c
+++ b/drivers/cpufreq/cpufreq-cpu0.c
@@ -152,11 +152,8 @@
goto out_put_reg;
}
- ret = of_init_opp_table(cpu_dev);
- if (ret) {
- pr_err("failed to init OPP table: %d\n", ret);
- goto out_put_clk;
- }
+ /* OPPs might be populated at runtime, don't check for error here */
+ of_init_opp_table(cpu_dev);
ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
if (ret) {
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 62259d2..d9fdedd 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1076,10 +1076,20 @@
kfree(policy);
}
-static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
+static int update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu,
+ struct device *cpu_dev)
{
+ int ret;
+
if (WARN_ON(cpu == policy->cpu))
- return;
+ return 0;
+
+ /* Move kobject to the new policy->cpu */
+ ret = kobject_move(&policy->kobj, &cpu_dev->kobj);
+ if (ret) {
+ pr_err("%s: Failed to move kobj: %d\n", __func__, ret);
+ return ret;
+ }
down_write(&policy->rwsem);
@@ -1090,6 +1100,8 @@
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
CPUFREQ_UPDATE_POLICY_CPU, policy);
+
+ return 0;
}
static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
@@ -1154,7 +1166,7 @@
* by invoking update_policy_cpu().
*/
if (recover_policy && cpu != policy->cpu)
- update_policy_cpu(policy, cpu);
+ WARN_ON(update_policy_cpu(policy, cpu, dev));
else
policy->cpu = cpu;
@@ -1307,38 +1319,11 @@
return __cpufreq_add_dev(dev, sif);
}
-static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy,
- unsigned int old_cpu)
-{
- struct device *cpu_dev;
- int ret;
-
- /* first sibling now owns the new sysfs dir */
- cpu_dev = get_cpu_device(cpumask_any_but(policy->cpus, old_cpu));
-
- sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
- ret = kobject_move(&policy->kobj, &cpu_dev->kobj);
- if (ret) {
- pr_err("%s: Failed to move kobj: %d\n", __func__, ret);
-
- down_write(&policy->rwsem);
- cpumask_set_cpu(old_cpu, policy->cpus);
- up_write(&policy->rwsem);
-
- ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
- "cpufreq");
-
- return -EINVAL;
- }
-
- return cpu_dev->id;
-}
-
static int __cpufreq_remove_dev_prepare(struct device *dev,
struct subsys_interface *sif)
{
unsigned int cpu = dev->id, cpus;
- int new_cpu, ret;
+ int ret;
unsigned long flags;
struct cpufreq_policy *policy;
@@ -1378,14 +1363,23 @@
if (cpu != policy->cpu) {
sysfs_remove_link(&dev->kobj, "cpufreq");
} else if (cpus > 1) {
- new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu);
- if (new_cpu >= 0) {
- update_policy_cpu(policy, new_cpu);
+ /* Nominate new CPU */
+ int new_cpu = cpumask_any_but(policy->cpus, cpu);
+ struct device *cpu_dev = get_cpu_device(new_cpu);
- if (!cpufreq_suspended)
- pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
- __func__, new_cpu, cpu);
+ sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
+ ret = update_policy_cpu(policy, new_cpu, cpu_dev);
+ if (ret) {
+ if (sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
+ "cpufreq"))
+ pr_err("%s: Failed to restore kobj link to cpu:%d\n",
+ __func__, cpu_dev->id);
+ return ret;
}
+
+ if (!cpufreq_suspended)
+ pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
+ __func__, new_cpu, cpu);
} else if (cpufreq_driver->stop_cpu && cpufreq_driver->setpolicy) {
cpufreq_driver->stop_cpu(policy);
}
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 18d4091..ad3f38f 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -170,21 +170,24 @@
dbs_freq_increase(policy, policy->max);
} else {
/* Calculate the next frequency proportional to load */
- unsigned int freq_next;
- freq_next = load * policy->cpuinfo.max_freq / 100;
+ unsigned int freq_next, min_f, max_f;
+
+ min_f = policy->cpuinfo.min_freq;
+ max_f = policy->cpuinfo.max_freq;
+ freq_next = min_f + load * (max_f - min_f) / 100;
/* No longer fully busy, reset rate_mult */
dbs_info->rate_mult = 1;
if (!od_tuners->powersave_bias) {
__cpufreq_driver_target(policy, freq_next,
- CPUFREQ_RELATION_L);
+ CPUFREQ_RELATION_C);
return;
}
freq_next = od_ops.powersave_bias_target(policy, freq_next,
CPUFREQ_RELATION_L);
- __cpufreq_driver_target(policy, freq_next, CPUFREQ_RELATION_L);
+ __cpufreq_driver_target(policy, freq_next, CPUFREQ_RELATION_C);
}
}
diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c
index 1632981..df14766 100644
--- a/drivers/cpufreq/freq_table.c
+++ b/drivers/cpufreq/freq_table.c
@@ -117,7 +117,7 @@
.frequency = 0,
};
struct cpufreq_frequency_table *pos;
- unsigned int freq, i = 0;
+ unsigned int freq, diff, i = 0;
pr_debug("request for target %u kHz (relation: %u) for cpu %u\n",
target_freq, relation, policy->cpu);
@@ -127,6 +127,7 @@
suboptimal.frequency = ~0;
break;
case CPUFREQ_RELATION_L:
+ case CPUFREQ_RELATION_C:
optimal.frequency = ~0;
break;
}
@@ -168,6 +169,15 @@
}
}
break;
+ case CPUFREQ_RELATION_C:
+ diff = abs(freq - target_freq);
+ if (diff < optimal.frequency ||
+ (diff == optimal.frequency &&
+ freq > table[optimal.driver_data].frequency)) {
+ optimal.frequency = diff;
+ optimal.driver_data = i;
+ }
+ break;
}
}
if (optimal.driver_data > i) {
diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
index af366c2..c2d3076 100644
--- a/drivers/cpufreq/imx6q-cpufreq.c
+++ b/drivers/cpufreq/imx6q-cpufreq.c
@@ -66,10 +66,12 @@
/* scaling up? scale voltage before frequency */
if (new_freq > old_freq) {
- ret = regulator_set_voltage_tol(pu_reg, imx6_soc_volt[index], 0);
- if (ret) {
- dev_err(cpu_dev, "failed to scale vddpu up: %d\n", ret);
- return ret;
+ if (!IS_ERR(pu_reg)) {
+ ret = regulator_set_voltage_tol(pu_reg, imx6_soc_volt[index], 0);
+ if (ret) {
+ dev_err(cpu_dev, "failed to scale vddpu up: %d\n", ret);
+ return ret;
+ }
}
ret = regulator_set_voltage_tol(soc_reg, imx6_soc_volt[index], 0);
if (ret) {
@@ -121,10 +123,12 @@
dev_warn(cpu_dev, "failed to scale vddsoc down: %d\n", ret);
ret = 0;
}
- ret = regulator_set_voltage_tol(pu_reg, imx6_soc_volt[index], 0);
- if (ret) {
- dev_warn(cpu_dev, "failed to scale vddpu down: %d\n", ret);
- ret = 0;
+ if (!IS_ERR(pu_reg)) {
+ ret = regulator_set_voltage_tol(pu_reg, imx6_soc_volt[index], 0);
+ if (ret) {
+ dev_warn(cpu_dev, "failed to scale vddpu down: %d\n", ret);
+ ret = 0;
+ }
}
}
@@ -182,9 +186,9 @@
}
arm_reg = regulator_get(cpu_dev, "arm");
- pu_reg = regulator_get(cpu_dev, "pu");
+ pu_reg = regulator_get_optional(cpu_dev, "pu");
soc_reg = regulator_get(cpu_dev, "soc");
- if (IS_ERR(arm_reg) || IS_ERR(pu_reg) || IS_ERR(soc_reg)) {
+ if (IS_ERR(arm_reg) || IS_ERR(soc_reg)) {
dev_err(cpu_dev, "failed to get regulators\n");
ret = -ENOENT;
goto put_reg;
@@ -268,9 +272,11 @@
ret = regulator_set_voltage_time(soc_reg, imx6_soc_volt[0], imx6_soc_volt[num - 1]);
if (ret > 0)
transition_latency += ret * 1000;
- ret = regulator_set_voltage_time(pu_reg, imx6_soc_volt[0], imx6_soc_volt[num - 1]);
- if (ret > 0)
- transition_latency += ret * 1000;
+ if (!IS_ERR(pu_reg)) {
+ ret = regulator_set_voltage_time(pu_reg, imx6_soc_volt[0], imx6_soc_volt[num - 1]);
+ if (ret > 0)
+ transition_latency += ret * 1000;
+ }
/*
* OPP is maintained in order of increasing frequency, and
@@ -327,7 +333,8 @@
cpufreq_unregister_driver(&imx6q_cpufreq_driver);
dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
regulator_put(arm_reg);
- regulator_put(pu_reg);
+ if (!IS_ERR(pu_reg))
+ regulator_put(pu_reg);
regulator_put(soc_reg);
clk_put(arm_clk);
clk_put(pll1_sys_clk);
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 86631cb..c5eac94 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -37,7 +37,6 @@
#define BYT_TURBO_RATIOS 0x66c
#define BYT_TURBO_VIDS 0x66d
-
#define FRAC_BITS 8
#define int_tofp(X) ((int64_t)(X) << FRAC_BITS)
#define fp_toint(X) ((X) >> FRAC_BITS)
@@ -50,7 +49,7 @@
static inline int32_t div_fp(int32_t x, int32_t y)
{
- return div_s64((int64_t)x << FRAC_BITS, (int64_t)y);
+ return div_s64((int64_t)x << FRAC_BITS, y);
}
struct sample {
@@ -148,7 +147,7 @@
};
static inline void pid_reset(struct _pid *pid, int setpoint, int busy,
- int deadband, int integral) {
+ int deadband, int integral) {
pid->setpoint = setpoint;
pid->deadband = deadband;
pid->integral = int_tofp(integral);
@@ -167,7 +166,6 @@
static inline void pid_d_gain_set(struct _pid *pid, int percent)
{
-
pid->d_gain = div_fp(int_tofp(percent), int_tofp(100));
}
@@ -207,16 +205,13 @@
pid_d_gain_set(&cpu->pid, pid_params.d_gain_pct);
pid_i_gain_set(&cpu->pid, pid_params.i_gain_pct);
- pid_reset(&cpu->pid,
- pid_params.setpoint,
- 100,
- pid_params.deadband,
- 0);
+ pid_reset(&cpu->pid, pid_params.setpoint, 100, pid_params.deadband, 0);
}
static inline void intel_pstate_reset_all_pid(void)
{
unsigned int cpu;
+
for_each_online_cpu(cpu) {
if (all_cpu_data[cpu])
intel_pstate_busy_pid_reset(all_cpu_data[cpu]);
@@ -230,13 +225,13 @@
intel_pstate_reset_all_pid();
return 0;
}
+
static int pid_param_get(void *data, u64 *val)
{
*val = *(u32 *)data;
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(fops_pid_param, pid_param_get,
- pid_param_set, "%llu\n");
+DEFINE_SIMPLE_ATTRIBUTE(fops_pid_param, pid_param_get, pid_param_set, "%llu\n");
struct pid_param {
char *name;
@@ -253,9 +248,9 @@
{NULL, NULL}
};
-static struct dentry *debugfs_parent;
-static void intel_pstate_debug_expose_params(void)
+static void __init intel_pstate_debug_expose_params(void)
{
+ struct dentry *debugfs_parent;
int i = 0;
debugfs_parent = debugfs_create_dir("pstate_snb", NULL);
@@ -263,8 +258,8 @@
return;
while (pid_files[i].name) {
debugfs_create_file(pid_files[i].name, 0660,
- debugfs_parent, pid_files[i].value,
- &fops_pid_param);
+ debugfs_parent, pid_files[i].value,
+ &fops_pid_param);
i++;
}
}
@@ -280,10 +275,11 @@
}
static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
- const char *buf, size_t count)
+ const char *buf, size_t count)
{
unsigned int input;
int ret;
+
ret = sscanf(buf, "%u", &input);
if (ret != 1)
return -EINVAL;
@@ -296,10 +292,11 @@
}
static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
- const char *buf, size_t count)
+ const char *buf, size_t count)
{
unsigned int input;
int ret;
+
ret = sscanf(buf, "%u", &input);
if (ret != 1)
return -EINVAL;
@@ -307,14 +304,16 @@
limits.max_sysfs_pct = clamp_t(int, input, 0 , 100);
limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct);
limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100));
+
return count;
}
static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
- const char *buf, size_t count)
+ const char *buf, size_t count)
{
unsigned int input;
int ret;
+
ret = sscanf(buf, "%u", &input);
if (ret != 1)
return -EINVAL;
@@ -342,17 +341,16 @@
static struct attribute_group intel_pstate_attr_group = {
.attrs = intel_pstate_attributes,
};
-static struct kobject *intel_pstate_kobject;
-static void intel_pstate_sysfs_expose_params(void)
+static void __init intel_pstate_sysfs_expose_params(void)
{
+ struct kobject *intel_pstate_kobject;
int rc;
intel_pstate_kobject = kobject_create_and_add("intel_pstate",
&cpu_subsys.dev_root->kobj);
BUG_ON(!intel_pstate_kobject);
- rc = sysfs_create_group(intel_pstate_kobject,
- &intel_pstate_attr_group);
+ rc = sysfs_create_group(intel_pstate_kobject, &intel_pstate_attr_group);
BUG_ON(rc);
}
@@ -360,6 +358,7 @@
static int byt_get_min_pstate(void)
{
u64 value;
+
rdmsrl(BYT_RATIOS, value);
return (value >> 8) & 0x7F;
}
@@ -367,6 +366,7 @@
static int byt_get_max_pstate(void)
{
u64 value;
+
rdmsrl(BYT_RATIOS, value);
return (value >> 16) & 0x7F;
}
@@ -374,6 +374,7 @@
static int byt_get_turbo_pstate(void)
{
u64 value;
+
rdmsrl(BYT_TURBO_RATIOS, value);
return value & 0x7F;
}
@@ -407,7 +408,6 @@
{
u64 value;
-
rdmsrl(BYT_VIDS, value);
cpudata->vid.min = int_tofp((value >> 8) & 0x7f);
cpudata->vid.max = int_tofp((value >> 16) & 0x7f);
@@ -420,10 +420,10 @@
cpudata->vid.turbo = value & 0x7f;
}
-
static int core_get_min_pstate(void)
{
u64 value;
+
rdmsrl(MSR_PLATFORM_INFO, value);
return (value >> 40) & 0xFF;
}
@@ -431,6 +431,7 @@
static int core_get_max_pstate(void)
{
u64 value;
+
rdmsrl(MSR_PLATFORM_INFO, value);
return (value >> 8) & 0xFF;
}
@@ -439,9 +440,10 @@
{
u64 value;
int nont, ret;
+
rdmsrl(MSR_NHM_TURBO_RATIO_LIMIT, value);
nont = core_get_max_pstate();
- ret = ((value) & 255);
+ ret = (value) & 255;
if (ret <= nont)
ret = nont;
return ret;
@@ -493,12 +495,12 @@
},
};
-
static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
{
int max_perf = cpu->pstate.turbo_pstate;
int max_perf_adj;
int min_perf;
+
if (limits.no_turbo)
max_perf = cpu->pstate.max_pstate;
@@ -507,8 +509,7 @@
cpu->pstate.min_pstate, cpu->pstate.turbo_pstate);
min_perf = fp_toint(mul_fp(int_tofp(max_perf), limits.min_perf));
- *min = clamp_t(int, min_perf,
- cpu->pstate.min_pstate, max_perf);
+ *min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf);
}
static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
@@ -529,21 +530,6 @@
pstate_funcs.set(cpu, pstate);
}
-static inline void intel_pstate_pstate_increase(struct cpudata *cpu, int steps)
-{
- int target;
- target = cpu->pstate.current_pstate + steps;
-
- intel_pstate_set_pstate(cpu, target);
-}
-
-static inline void intel_pstate_pstate_decrease(struct cpudata *cpu, int steps)
-{
- int target;
- target = cpu->pstate.current_pstate - steps;
- intel_pstate_set_pstate(cpu, target);
-}
-
static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
{
cpu->pstate.min_pstate = pstate_funcs.get_min();
@@ -559,13 +545,9 @@
{
struct sample *sample = &cpu->sample;
int64_t core_pct;
- int32_t rem;
core_pct = int_tofp(sample->aperf) * int_tofp(100);
- core_pct = div_u64_rem(core_pct, int_tofp(sample->mperf), &rem);
-
- if ((rem << 1) >= int_tofp(sample->mperf))
- core_pct += 1;
+ core_pct = div64_u64(core_pct, int_tofp(sample->mperf));
sample->freq = fp_toint(
mul_fp(int_tofp(cpu->pstate.max_pstate * 1000), core_pct));
@@ -576,12 +558,12 @@
static inline void intel_pstate_sample(struct cpudata *cpu)
{
u64 aperf, mperf;
+ unsigned long flags;
+ local_irq_save(flags);
rdmsrl(MSR_IA32_APERF, aperf);
rdmsrl(MSR_IA32_MPERF, mperf);
-
- aperf = aperf >> FRAC_BITS;
- mperf = mperf >> FRAC_BITS;
+ local_irq_restore(flags);
cpu->last_sample_time = cpu->sample.time;
cpu->sample.time = ktime_get();
@@ -598,10 +580,9 @@
static inline void intel_pstate_set_sample_time(struct cpudata *cpu)
{
- int sample_time, delay;
+ int delay;
- sample_time = pid_params.sample_rate_ms;
- delay = msecs_to_jiffies(sample_time);
+ delay = msecs_to_jiffies(pid_params.sample_rate_ms);
mod_timer_pinned(&cpu->timer, jiffies + delay);
}
@@ -616,12 +597,12 @@
current_pstate = int_tofp(cpu->pstate.current_pstate);
core_busy = mul_fp(core_busy, div_fp(max_pstate, current_pstate));
- sample_time = (pid_params.sample_rate_ms * USEC_PER_MSEC);
+ sample_time = pid_params.sample_rate_ms * USEC_PER_MSEC;
duration_us = (u32) ktime_us_delta(cpu->sample.time,
- cpu->last_sample_time);
+ cpu->last_sample_time);
if (duration_us > sample_time * 3) {
sample_ratio = div_fp(int_tofp(sample_time),
- int_tofp(duration_us));
+ int_tofp(duration_us));
core_busy = mul_fp(core_busy, sample_ratio);
}
@@ -632,20 +613,15 @@
{
int32_t busy_scaled;
struct _pid *pid;
- signed int ctl = 0;
- int steps;
+ signed int ctl;
pid = &cpu->pid;
busy_scaled = intel_pstate_get_scaled_busy(cpu);
ctl = pid_calc(pid, busy_scaled);
- steps = abs(ctl);
-
- if (ctl < 0)
- intel_pstate_pstate_increase(cpu, steps);
- else
- intel_pstate_pstate_decrease(cpu, steps);
+ /* Negative values of ctl increase the pstate and vice versa */
+ intel_pstate_set_pstate(cpu, cpu->pstate.current_pstate - ctl);
}
static void intel_pstate_timer_func(unsigned long __data)
@@ -705,8 +681,7 @@
init_timer_deferrable(&cpu->timer);
cpu->timer.function = intel_pstate_timer_func;
- cpu->timer.data =
- (unsigned long)cpu;
+ cpu->timer.data = (unsigned long)cpu;
cpu->timer.expires = jiffies + HZ/100;
intel_pstate_busy_pid_reset(cpu);
intel_pstate_sample(cpu);
@@ -751,7 +726,7 @@
limits.min_perf_pct = clamp_t(int, limits.min_perf_pct, 0 , 100);
limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100));
- limits.max_policy_pct = policy->max * 100 / policy->cpuinfo.max_freq;
+ limits.max_policy_pct = (policy->max * 100) / policy->cpuinfo.max_freq;
limits.max_policy_pct = clamp_t(int, limits.max_policy_pct, 0 , 100);
limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct);
limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100));
@@ -763,8 +738,8 @@
{
cpufreq_verify_within_cpu_limits(policy);
- if ((policy->policy != CPUFREQ_POLICY_POWERSAVE) &&
- (policy->policy != CPUFREQ_POLICY_PERFORMANCE))
+ if (policy->policy != CPUFREQ_POLICY_POWERSAVE &&
+ policy->policy != CPUFREQ_POLICY_PERFORMANCE)
return -EINVAL;
return 0;
@@ -797,7 +772,7 @@
rdmsrl(MSR_IA32_MISC_ENABLE, misc_en);
if (misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE ||
- cpu->pstate.max_pstate == cpu->pstate.turbo_pstate) {
+ cpu->pstate.max_pstate == cpu->pstate.turbo_pstate) {
limits.turbo_disabled = 1;
limits.no_turbo = 1;
}
@@ -839,8 +814,8 @@
rdmsrl(MSR_IA32_MPERF, mperf);
if (!pstate_funcs.get_max() ||
- !pstate_funcs.get_min() ||
- !pstate_funcs.get_turbo())
+ !pstate_funcs.get_min() ||
+ !pstate_funcs.get_turbo())
return -ENODEV;
rdmsrl(MSR_IA32_APERF, tmp);
@@ -922,14 +897,14 @@
struct acpi_table_header hdr;
struct hw_vendor_info *v_info;
- if (acpi_disabled
- || ACPI_FAILURE(acpi_get_table_header(ACPI_SIG_FADT, 0, &hdr)))
+ if (acpi_disabled ||
+ ACPI_FAILURE(acpi_get_table_header(ACPI_SIG_FADT, 0, &hdr)))
return false;
for (v_info = vendor_info; v_info->valid; v_info++) {
- if (!strncmp(hdr.oem_id, v_info->oem_id, ACPI_OEM_ID_SIZE)
- && !strncmp(hdr.oem_table_id, v_info->oem_table_id, ACPI_OEM_TABLE_ID_SIZE)
- && intel_pstate_no_acpi_pss())
+ if (!strncmp(hdr.oem_id, v_info->oem_id, ACPI_OEM_ID_SIZE) &&
+ !strncmp(hdr.oem_table_id, v_info->oem_table_id, ACPI_OEM_TABLE_ID_SIZE) &&
+ intel_pstate_no_acpi_pss())
return true;
}
diff --git a/drivers/cpufreq/powernow-k6.c b/drivers/cpufreq/powernow-k6.c
index c8012bc..f910272 100644
--- a/drivers/cpufreq/powernow-k6.c
+++ b/drivers/cpufreq/powernow-k6.c
@@ -55,6 +55,7 @@
unsigned freq;
unsigned mult;
} usual_frequency_table[] = {
+ { 350000, 35 }, // 100 * 3.5
{ 400000, 40 }, // 100 * 4
{ 450000, 45 }, // 100 * 4.5
{ 475000, 50 }, // 95 * 5
diff --git a/drivers/cpufreq/sa1110-cpufreq.c b/drivers/cpufreq/sa1110-cpufreq.c
index 5463767..b5befc2 100644
--- a/drivers/cpufreq/sa1110-cpufreq.c
+++ b/drivers/cpufreq/sa1110-cpufreq.c
@@ -349,7 +349,7 @@
name = "K4S641632D";
if (machine_is_h3100())
name = "KM416S4030CT";
- if (machine_is_jornada720())
+ if (machine_is_jornada720() || machine_is_h3600())
name = "K4S281632B-1H";
if (machine_is_nanoengine())
name = "MT48LC8M16A2TG-75";
diff --git a/drivers/cpuidle/Kconfig b/drivers/cpuidle/Kconfig
index 1b96fb9..32748c3 100644
--- a/drivers/cpuidle/Kconfig
+++ b/drivers/cpuidle/Kconfig
@@ -15,12 +15,7 @@
if CPU_IDLE
config CPU_IDLE_MULTIPLE_DRIVERS
- bool "Support multiple cpuidle drivers"
- default n
- help
- Allows the cpuidle framework to use different drivers for each CPU.
- This is useful if you have a system with different CPU latencies and
- states. If unsure say N.
+ bool
config CPU_IDLE_GOV_LADDER
bool "Ladder governor (for periodic timer tick)"
diff --git a/drivers/cpuidle/Kconfig.arm b/drivers/cpuidle/Kconfig.arm
index b6d69e8..a186dec 100644
--- a/drivers/cpuidle/Kconfig.arm
+++ b/drivers/cpuidle/Kconfig.arm
@@ -10,6 +10,7 @@
config ARM_BIG_LITTLE_CPUIDLE
bool "Support for ARM big.LITTLE processors"
depends on ARCH_VEXPRESS_TC2_PM
+ depends on MCPM
select ARM_CPU_SUSPEND
select CPU_IDLE_MULTIPLE_DRIVERS
help
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index cb70199..ee9df5e 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -119,11 +119,13 @@
ktime_t time_start, time_end;
s64 diff;
+ trace_cpu_idle_rcuidle(index, dev->cpu);
time_start = ktime_get();
entered_state = target_state->enter(dev, drv, index);
time_end = ktime_get();
+ trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu);
if (!cpuidle_state_is_coupled(dev, drv, entered_state))
local_irq_enable();
diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c
index 9634f20..e431d11 100644
--- a/drivers/cpuidle/driver.c
+++ b/drivers/cpuidle/driver.c
@@ -182,10 +182,6 @@
static int poll_idle(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index)
{
- ktime_t t1, t2;
- s64 diff;
-
- t1 = ktime_get();
local_irq_enable();
if (!current_set_polling_and_test()) {
while (!need_resched())
@@ -193,13 +189,6 @@
}
current_clr_polling();
- t2 = ktime_get();
- diff = ktime_to_us(ktime_sub(t2, t1));
- if (diff > INT_MAX)
- diff = INT_MAX;
-
- dev->last_residency = (int) diff;
-
return index;
}
diff --git a/drivers/cpuidle/governors/ladder.c b/drivers/cpuidle/governors/ladder.c
index 9f08e8c..044ee0d 100644
--- a/drivers/cpuidle/governors/ladder.c
+++ b/drivers/cpuidle/governors/ladder.c
@@ -144,7 +144,7 @@
ldev->last_state_idx = CPUIDLE_DRIVER_STATE_START;
- for (i = 0; i < drv->state_count; i++) {
+ for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) {
state = &drv->states[i];
lstate = &ldev->states[i];
@@ -156,7 +156,7 @@
if (i < drv->state_count - 1)
lstate->threshold.promotion_time = state->exit_latency;
- if (i > 0)
+ if (i > CPUIDLE_DRIVER_STATE_START)
lstate->threshold.demotion_time = state->exit_latency;
}
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index c4f80c1..c3732fa 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -35,7 +35,6 @@
#define RESOLUTION 1024
#define DECAY 8
#define MAX_INTERESTING 50000
-#define STDDEV_THRESH 400
/*
diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
index efe2f17..97c5903 100644
--- a/drivers/cpuidle/sysfs.c
+++ b/drivers/cpuidle/sysfs.c
@@ -445,7 +445,7 @@
#define define_one_driver_ro(_name, show) \
static struct cpuidle_driver_attr attr_driver_##_name = \
- __ATTR(_name, 0644, show, NULL)
+ __ATTR(_name, 0444, show, NULL)
struct cpuidle_driver_kobj {
struct cpuidle_driver *drv;
diff --git a/drivers/devfreq/Kconfig b/drivers/devfreq/Kconfig
index 49e74c1..3dced0a 100644
--- a/drivers/devfreq/Kconfig
+++ b/drivers/devfreq/Kconfig
@@ -68,7 +68,6 @@
config ARM_EXYNOS4_BUS_DEVFREQ
bool "ARM Exynos4210/4212/4412 Memory Bus DEVFREQ Driver"
depends on (CPU_EXYNOS4210 || SOC_EXYNOS4212 || SOC_EXYNOS4412) && !ARCH_MULTIPLATFORM
- select ARCH_HAS_OPP
select DEVFREQ_GOV_SIMPLE_ONDEMAND
select PM_OPP
help
diff --git a/drivers/firewire/Kconfig b/drivers/firewire/Kconfig
index 4199849..145974f 100644
--- a/drivers/firewire/Kconfig
+++ b/drivers/firewire/Kconfig
@@ -1,4 +1,5 @@
menu "IEEE 1394 (FireWire) support"
+ depends on HAS_DMA
depends on PCI || COMPILE_TEST
# firewire-core does not depend on PCI but is
# not useful without PCI controller driver
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 5798541..a66a321 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -336,10 +336,10 @@
QUIRK_CYCLE_TIMER | QUIRK_IR_WAKE},
{PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT6315, 0,
- QUIRK_CYCLE_TIMER | QUIRK_NO_MSI},
+ QUIRK_CYCLE_TIMER /* FIXME: necessary? */ | QUIRK_NO_MSI},
{PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT6315, PCI_ANY_ID,
- 0},
+ QUIRK_NO_MSI},
{PCI_VENDOR_ID_VIA, PCI_ANY_ID, PCI_ANY_ID,
QUIRK_CYCLE_TIMER | QUIRK_NO_MSI},
diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c
index 1491dd4..65f2f3f 100644
--- a/drivers/firmware/efi/cper.c
+++ b/drivers/firmware/efi/cper.c
@@ -262,7 +262,7 @@
};
static void cper_print_pcie(const char *pfx, const struct cper_sec_pcie *pcie,
- const struct acpi_generic_data *gdata)
+ const struct acpi_hest_generic_data *gdata)
{
if (pcie->validation_bits & CPER_PCIE_VALID_PORT_TYPE)
printk("%s""port_type: %d, %s\n", pfx, pcie->port_type,
@@ -298,7 +298,7 @@
}
static void cper_estatus_print_section(
- const char *pfx, const struct acpi_generic_data *gdata, int sec_no)
+ const char *pfx, const struct acpi_hest_generic_data *gdata, int sec_no)
{
uuid_le *sec_type = (uuid_le *)gdata->section_type;
__u16 severity;
@@ -344,9 +344,9 @@
}
void cper_estatus_print(const char *pfx,
- const struct acpi_generic_status *estatus)
+ const struct acpi_hest_generic_status *estatus)
{
- struct acpi_generic_data *gdata;
+ struct acpi_hest_generic_data *gdata;
unsigned int data_len, gedata_len;
int sec_no = 0;
char newpfx[64];
@@ -359,7 +359,7 @@
"and requires no further action");
printk("%s""event severity: %s\n", pfx, cper_severity_str(severity));
data_len = estatus->data_length;
- gdata = (struct acpi_generic_data *)(estatus + 1);
+ gdata = (struct acpi_hest_generic_data *)(estatus + 1);
snprintf(newpfx, sizeof(newpfx), "%s%s", pfx, INDENT_SP);
while (data_len >= sizeof(*gdata)) {
gedata_len = gdata->error_data_length;
@@ -371,10 +371,10 @@
}
EXPORT_SYMBOL_GPL(cper_estatus_print);
-int cper_estatus_check_header(const struct acpi_generic_status *estatus)
+int cper_estatus_check_header(const struct acpi_hest_generic_status *estatus)
{
if (estatus->data_length &&
- estatus->data_length < sizeof(struct acpi_generic_data))
+ estatus->data_length < sizeof(struct acpi_hest_generic_data))
return -EINVAL;
if (estatus->raw_data_length &&
estatus->raw_data_offset < sizeof(*estatus) + estatus->data_length)
@@ -384,9 +384,9 @@
}
EXPORT_SYMBOL_GPL(cper_estatus_check_header);
-int cper_estatus_check(const struct acpi_generic_status *estatus)
+int cper_estatus_check(const struct acpi_hest_generic_status *estatus)
{
- struct acpi_generic_data *gdata;
+ struct acpi_hest_generic_data *gdata;
unsigned int data_len, gedata_len;
int rc;
@@ -394,7 +394,7 @@
if (rc)
return rc;
data_len = estatus->data_length;
- gdata = (struct acpi_generic_data *)(estatus + 1);
+ gdata = (struct acpi_hest_generic_data *)(estatus + 1);
while (data_len >= sizeof(*gdata)) {
gedata_len = gdata->error_data_length;
if (gedata_len > data_len - sizeof(*gdata))
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index eff1a2f..dc793466 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -346,6 +346,7 @@
struct param_info {
int verbose;
+ int found;
void *params;
};
@@ -362,16 +363,12 @@
(strcmp(uname, "chosen") != 0 && strcmp(uname, "chosen@0") != 0))
return 0;
- pr_info("Getting parameters from FDT:\n");
-
for (i = 0; i < ARRAY_SIZE(dt_params); i++) {
prop = of_get_flat_dt_prop(node, dt_params[i].propname, &len);
- if (!prop) {
- pr_err("Can't find %s in device tree!\n",
- dt_params[i].name);
+ if (!prop)
return 0;
- }
dest = info->params + dt_params[i].offset;
+ info->found++;
val = of_read_number(prop, len / sizeof(u32));
@@ -390,10 +387,21 @@
int __init efi_get_fdt_params(struct efi_fdt_params *params, int verbose)
{
struct param_info info;
+ int ret;
+
+ pr_info("Getting EFI parameters from FDT:\n");
info.verbose = verbose;
+ info.found = 0;
info.params = params;
- return of_scan_flat_dt(fdt_find_uefi_params, &info);
+ ret = of_scan_flat_dt(fdt_find_uefi_params, &info);
+ if (!info.found)
+ pr_info("UEFI not found.\n");
+ else if (!ret)
+ pr_err("Can't find '%s' in device tree!\n",
+ dt_params[info.found].name);
+
+ return ret;
}
#endif /* CONFIG_EFI_PARAMS_FROM_FDT */
diff --git a/drivers/firmware/efi/fdt.c b/drivers/firmware/efi/fdt.c
index 82d7741..507a3df 100644
--- a/drivers/firmware/efi/fdt.c
+++ b/drivers/firmware/efi/fdt.c
@@ -23,16 +23,6 @@
u32 fdt_val32;
u64 fdt_val64;
- /*
- * Copy definition of linux_banner here. Since this code is
- * built as part of the decompressor for ARM v7, pulling
- * in version.c where linux_banner is defined for the
- * kernel brings other kernel dependencies with it.
- */
- const char linux_banner[] =
- "Linux version " UTS_RELEASE " (" LINUX_COMPILE_BY "@"
- LINUX_COMPILE_HOST ") (" LINUX_COMPILER ") " UTS_VERSION "\n";
-
/* Do some checks on provided FDT, if it exists*/
if (orig_fdt) {
if (fdt_check_header(orig_fdt)) {
diff --git a/drivers/gpio/gpio-mcp23s08.c b/drivers/gpio/gpio-mcp23s08.c
index fe7c0e2..57adbc9 100644
--- a/drivers/gpio/gpio-mcp23s08.c
+++ b/drivers/gpio/gpio-mcp23s08.c
@@ -900,8 +900,6 @@
if (spi_present_mask & (1 << addr))
chips++;
}
- if (!chips)
- return -ENODEV;
} else {
type = spi_get_device_id(spi)->driver_data;
pdata = dev_get_platdata(&spi->dev);
@@ -940,10 +938,6 @@
if (!(spi_present_mask & (1 << addr)))
continue;
chips--;
- if (chips < 0) {
- dev_err(&spi->dev, "FATAL: invalid negative chip id\n");
- goto fail;
- }
data->mcp[addr] = &data->chip[chips];
status = mcp23s08_probe_one(data->mcp[addr], &spi->dev, spi,
0x40 | (addr << 1), type, base,
diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
index 0c9f803..b6ae89e 100644
--- a/drivers/gpio/gpio-rcar.c
+++ b/drivers/gpio/gpio-rcar.c
@@ -284,6 +284,7 @@
static struct irq_domain_ops gpio_rcar_irq_domain_ops = {
.map = gpio_rcar_irq_domain_map,
+ .xlate = irq_domain_xlate_twocell,
};
struct gpio_rcar_info {
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index f361263..d893e4d 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1616,22 +1616,6 @@
return ret;
}
-void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
-{
- struct i915_vma *vma;
-
- /*
- * Only the global gtt is relevant for gtt memory mappings, so restrict
- * list traversal to objects bound into the global address space. Note
- * that the active list should be empty, but better safe than sorry.
- */
- WARN_ON(!list_empty(&dev_priv->gtt.base.active_list));
- list_for_each_entry(vma, &dev_priv->gtt.base.active_list, mm_list)
- i915_gem_release_mmap(vma->obj);
- list_for_each_entry(vma, &dev_priv->gtt.base.inactive_list, mm_list)
- i915_gem_release_mmap(vma->obj);
-}
-
/**
* i915_gem_release_mmap - remove physical page mappings
* @obj: obj in question
@@ -1657,6 +1641,15 @@
obj->fault_mappable = false;
}
+void
+i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
+{
+ struct drm_i915_gem_object *obj;
+
+ list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
+ i915_gem_release_mmap(obj);
+}
+
uint32_t
i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
{
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
index 3521f99..34894b5 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.c
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
@@ -31,7 +31,7 @@
struct i915_render_state {
struct drm_i915_gem_object *obj;
unsigned long ggtt_offset;
- void *batch;
+ u32 *batch;
u32 size;
u32 len;
};
@@ -80,7 +80,7 @@
static void render_state_free(struct i915_render_state *so)
{
- kunmap(so->batch);
+ kunmap(kmap_to_page(so->batch));
i915_gem_object_ggtt_unpin(so->obj);
drm_gem_object_unreference(&so->obj->base);
kfree(so);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 267f069..c05c84f 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -2845,7 +2845,7 @@
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
struct intel_engine_cs *signaller;
- u32 seqno, ctl;
+ u32 seqno;
ring->hangcheck.deadlock++;
@@ -2857,15 +2857,12 @@
if (signaller->hangcheck.deadlock >= I915_NUM_RINGS)
return -1;
- /* cursory check for an unkickable deadlock */
- ctl = I915_READ_CTL(signaller);
- if (ctl & RING_WAIT_SEMAPHORE && semaphore_passed(signaller) < 0)
- return -1;
-
if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno))
return 1;
- if (signaller->hangcheck.deadlock)
+ /* cursory check for an unkickable deadlock */
+ if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
+ semaphore_passed(signaller) < 0)
return -1;
return 0;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index e27e780..f0be855 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -11673,6 +11673,9 @@
/* Toshiba CB35 Chromebook (Celeron 2955U) */
{ 0x0a06, 0x1179, 0x0a88, quirk_backlight_present },
+
+ /* HP Chromebook 14 (Celeron 2955U) */
+ { 0x0a06, 0x103c, 0x21ed, quirk_backlight_present },
};
static void intel_init_quirks(struct drm_device *dev)
@@ -11911,6 +11914,7 @@
* ... */
plane = crtc->plane;
crtc->plane = !plane;
+ crtc->primary_enabled = true;
dev_priv->display.crtc_disable(&crtc->base);
crtc->plane = plane;
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 075170d..8a1a4fb 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -906,8 +906,8 @@
mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
bpp);
- for (lane_count = min_lane_count; lane_count <= max_lane_count; lane_count <<= 1) {
- for (clock = min_clock; clock <= max_clock; clock++) {
+ for (clock = min_clock; clock <= max_clock; clock++) {
+ for (lane_count = min_lane_count; lane_count <= max_lane_count; lane_count <<= 1) {
link_clock = drm_dp_bw_code_to_link_rate(bws[clock]);
link_avail = intel_dp_max_data_rate(link_clock,
lane_count);
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 2312602..5e5a72f 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -111,6 +111,13 @@
pipe_config->adjusted_mode.flags |= flags;
+ /* gen2/3 store dither state in pfit control, needs to match */
+ if (INTEL_INFO(dev)->gen < 4) {
+ tmp = I915_READ(PFIT_CONTROL);
+
+ pipe_config->gmch_pfit.control |= tmp & PANEL_8TO6_DITHER_ENABLE;
+ }
+
dotclock = pipe_config->port_clock;
if (HAS_PCH_SPLIT(dev_priv->dev))
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 628cd89..12b02fe 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -361,16 +361,16 @@
pfit_control |= ((intel_crtc->pipe << PFIT_PIPE_SHIFT) |
PFIT_FILTER_FUZZY);
- /* Make sure pre-965 set dither correctly for 18bpp panels. */
- if (INTEL_INFO(dev)->gen < 4 && pipe_config->pipe_bpp == 18)
- pfit_control |= PANEL_8TO6_DITHER_ENABLE;
-
out:
if ((pfit_control & PFIT_ENABLE) == 0) {
pfit_control = 0;
pfit_pgm_ratios = 0;
}
+ /* Make sure pre-965 set dither correctly for 18bpp panels. */
+ if (INTEL_INFO(dev)->gen < 4 && pipe_config->pipe_bpp == 18)
+ pfit_control |= PANEL_8TO6_DITHER_ENABLE;
+
pipe_config->gmch_pfit.control = pfit_control;
pipe_config->gmch_pfit.pgm_ratios = pfit_pgm_ratios;
pipe_config->gmch_pfit.lvds_border_bits = border;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c b/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c
index cfde9eb..6212537 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c
@@ -192,11 +192,11 @@
nouveau_therm_threshold_hyst_polling(therm, &sensor->thrs_shutdown,
NOUVEAU_THERM_THRS_SHUTDOWN);
+ spin_unlock_irqrestore(&priv->sensor.alarm_program_lock, flags);
+
/* schedule the next poll in one second */
if (therm->temp_get(therm) >= 0 && list_empty(&alarm->head))
- ptimer->alarm(ptimer, 1000 * 1000 * 1000, alarm);
-
- spin_unlock_irqrestore(&priv->sensor.alarm_program_lock, flags);
+ ptimer->alarm(ptimer, 1000000000ULL, alarm);
}
void
diff --git a/drivers/gpu/drm/qxl/qxl_irq.c b/drivers/gpu/drm/qxl/qxl_irq.c
index 34d6a85..0bf1e20 100644
--- a/drivers/gpu/drm/qxl/qxl_irq.c
+++ b/drivers/gpu/drm/qxl/qxl_irq.c
@@ -33,6 +33,9 @@
pending = xchg(&qdev->ram_header->int_pending, 0);
+ if (!pending)
+ return IRQ_NONE;
+
atomic_inc(&qdev->irq_received);
if (pending & QXL_INTERRUPT_DISPLAY) {
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index a03c734..30d242b 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -1414,8 +1414,8 @@
tmp &= ~EVERGREEN_GRPH_SURFACE_UPDATE_H_RETRACE_EN;
WREG32(EVERGREEN_GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, tmp);
- /* set pageflip to happen anywhere in vblank interval */
- WREG32(EVERGREEN_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 0);
+ /* set pageflip to happen only at start of vblank interval (front porch) */
+ WREG32(EVERGREEN_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 3);
if (!atomic && fb && fb != crtc->primary->fb) {
radeon_fb = to_radeon_framebuffer(fb);
@@ -1614,8 +1614,8 @@
tmp &= ~AVIVO_D1GRPH_SURFACE_UPDATE_H_RETRACE_EN;
WREG32(AVIVO_D1GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, tmp);
- /* set pageflip to happen anywhere in vblank interval */
- WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 0);
+ /* set pageflip to happen only at start of vblank interval (front porch) */
+ WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 3);
if (!atomic && fb && fb != crtc->primary->fb) {
radeon_fb = to_radeon_framebuffer(fb);
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 2b29084..7d68203 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -183,7 +183,6 @@
struct backlight_properties props;
struct radeon_backlight_privdata *pdata;
struct radeon_encoder_atom_dig *dig;
- u8 backlight_level;
char bl_name[16];
/* Mac laptops with multiple GPUs use the gmux driver for backlight
@@ -222,12 +221,17 @@
pdata->encoder = radeon_encoder;
- backlight_level = radeon_atom_get_backlight_level_from_reg(rdev);
-
dig = radeon_encoder->enc_priv;
dig->bl_dev = bd;
bd->props.brightness = radeon_atom_backlight_get_brightness(bd);
+ /* Set a reasonable default here if the level is 0 otherwise
+ * fbdev will attempt to turn the backlight on after console
+ * unblanking and it will try and restore 0 which turns the backlight
+ * off again.
+ */
+ if (bd->props.brightness == 0)
+ bd->props.brightness = RADEON_MAX_BL_LEVEL;
bd->props.power = FB_BLANK_UNBLANK;
backlight_update_status(bd);
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 0b24711..c0ea661 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -2291,6 +2291,7 @@
gb_tile_moden = 0;
break;
}
+ rdev->config.cik.macrotile_mode_array[reg_offset] = gb_tile_moden;
WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden);
}
} else if (num_pipe_configs == 8) {
@@ -7376,6 +7377,7 @@
tmp = RREG32(IH_RB_CNTL);
tmp |= IH_WPTR_OVERFLOW_CLEAR;
WREG32(IH_RB_CNTL, tmp);
+ wptr &= ~RB_OVERFLOW;
}
return (wptr & rdev->ih.ptr_mask);
}
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index f7ece0f..15e4f28 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -2642,8 +2642,9 @@
for (i = 0; i < rdev->num_crtc; i++) {
if (save->crtc_enabled[i]) {
tmp = RREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i]);
- if ((tmp & 0x3) != 0) {
- tmp &= ~0x3;
+ if ((tmp & 0x7) != 3) {
+ tmp &= ~0x7;
+ tmp |= 0x3;
WREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i], tmp);
}
tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
@@ -4755,6 +4756,7 @@
tmp = RREG32(IH_RB_CNTL);
tmp |= IH_WPTR_OVERFLOW_CLEAR;
WREG32(IH_RB_CNTL, tmp);
+ wptr &= ~RB_OVERFLOW;
}
return (wptr & rdev->ih.ptr_mask);
}
diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
index 333d143..23bff59 100644
--- a/drivers/gpu/drm/radeon/evergreen_reg.h
+++ b/drivers/gpu/drm/radeon/evergreen_reg.h
@@ -239,7 +239,6 @@
# define EVERGREEN_CRTC_V_BLANK (1 << 0)
#define EVERGREEN_CRTC_STATUS_POSITION 0x6e90
#define EVERGREEN_CRTC_STATUS_HV_COUNT 0x6ea0
-#define EVERGREEN_MASTER_UPDATE_MODE 0x6ef8
#define EVERGREEN_CRTC_UPDATE_LOCK 0x6ed4
#define EVERGREEN_MASTER_UPDATE_LOCK 0x6ef4
#define EVERGREEN_MASTER_UPDATE_MODE 0x6ef8
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index c66952d..3c69f58 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -3795,6 +3795,7 @@
tmp = RREG32(IH_RB_CNTL);
tmp |= IH_WPTR_OVERFLOW_CLEAR;
WREG32(IH_RB_CNTL, tmp);
+ wptr &= ~RB_OVERFLOW;
}
return (wptr & rdev->ih.ptr_mask);
}
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 29d9cc0..60c47f8 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -449,6 +449,7 @@
/* protected by vm mutex */
struct list_head vm_list;
+ struct list_head vm_status;
/* constant after initialization */
struct radeon_vm *vm;
@@ -684,10 +685,9 @@
struct work_struct unpin_work;
struct radeon_device *rdev;
int crtc_id;
- struct drm_framebuffer *fb;
+ uint64_t base;
struct drm_pending_vblank_event *event;
struct radeon_bo *old_rbo;
- struct radeon_bo *new_rbo;
struct radeon_fence *fence;
};
@@ -868,6 +868,9 @@
struct list_head va;
unsigned id;
+ /* BOs freed, but not yet updated in the PT */
+ struct list_head freed;
+
/* contains the page directory */
struct radeon_bo *page_directory;
uint64_t pd_gpu_addr;
@@ -876,6 +879,8 @@
/* array of page tables, one for each page directory entry */
struct radeon_vm_pt *page_tables;
+ struct radeon_bo_va *ib_bo_va;
+
struct mutex mutex;
/* last fence for cs using this vm */
struct radeon_fence *fence;
@@ -2833,9 +2838,10 @@
uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr);
int radeon_vm_update_page_directory(struct radeon_device *rdev,
struct radeon_vm *vm);
+int radeon_vm_clear_freed(struct radeon_device *rdev,
+ struct radeon_vm *vm);
int radeon_vm_bo_update(struct radeon_device *rdev,
- struct radeon_vm *vm,
- struct radeon_bo *bo,
+ struct radeon_bo_va *bo_va,
struct ttm_mem_reg *mem);
void radeon_vm_bo_invalidate(struct radeon_device *rdev,
struct radeon_bo *bo);
@@ -2848,8 +2854,8 @@
struct radeon_bo_va *bo_va,
uint64_t offset,
uint32_t flags);
-int radeon_vm_bo_rmv(struct radeon_device *rdev,
- struct radeon_bo_va *bo_va);
+void radeon_vm_bo_rmv(struct radeon_device *rdev,
+ struct radeon_bo_va *bo_va);
/* audio */
void r600_audio_update_hdmi(struct work_struct *work);
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 71a1434..ae763f6 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -461,13 +461,23 @@
struct radeon_vm *vm)
{
struct radeon_device *rdev = p->rdev;
+ struct radeon_bo_va *bo_va;
int i, r;
r = radeon_vm_update_page_directory(rdev, vm);
if (r)
return r;
- r = radeon_vm_bo_update(rdev, vm, rdev->ring_tmp_bo.bo,
+ r = radeon_vm_clear_freed(rdev, vm);
+ if (r)
+ return r;
+
+ if (vm->ib_bo_va == NULL) {
+ DRM_ERROR("Tmp BO not in VM!\n");
+ return -EINVAL;
+ }
+
+ r = radeon_vm_bo_update(rdev, vm->ib_bo_va,
&rdev->ring_tmp_bo.bo->tbo.mem);
if (r)
return r;
@@ -480,7 +490,13 @@
continue;
bo = p->relocs[i].robj;
- r = radeon_vm_bo_update(rdev, vm, bo, &bo->tbo.mem);
+ bo_va = radeon_vm_bo_find(vm, bo);
+ if (bo_va == NULL) {
+ dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm);
+ return -EINVAL;
+ }
+
+ r = radeon_vm_bo_update(rdev, bo_va, &bo->tbo.mem);
if (r)
return r;
}
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 03686fa..697add2 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1056,36 +1056,36 @@
if (!radeon_check_pot_argument(radeon_vm_size)) {
dev_warn(rdev->dev, "VM size (%d) must be a power of 2\n",
radeon_vm_size);
- radeon_vm_size = 4096;
+ radeon_vm_size = 4;
}
- if (radeon_vm_size < 4) {
- dev_warn(rdev->dev, "VM size (%d) to small, min is 4MB\n",
+ if (radeon_vm_size < 1) {
+ dev_warn(rdev->dev, "VM size (%d) to small, min is 1GB\n",
radeon_vm_size);
- radeon_vm_size = 4096;
+ radeon_vm_size = 4;
}
/*
* Max GPUVM size for Cayman, SI and CI are 40 bits.
*/
- if (radeon_vm_size > 1024*1024) {
- dev_warn(rdev->dev, "VM size (%d) to large, max is 1TB\n",
+ if (radeon_vm_size > 1024) {
+ dev_warn(rdev->dev, "VM size (%d) too large, max is 1TB\n",
radeon_vm_size);
- radeon_vm_size = 4096;
+ radeon_vm_size = 4;
}
/* defines number of bits in page table versus page directory,
* a page is 4KB so we have 12 bits offset, minimum 9 bits in the
* page table and the remaining bits are in the page directory */
if (radeon_vm_block_size < 9) {
- dev_warn(rdev->dev, "VM page table size (%d) to small\n",
+ dev_warn(rdev->dev, "VM page table size (%d) too small\n",
radeon_vm_block_size);
radeon_vm_block_size = 9;
}
if (radeon_vm_block_size > 24 ||
- radeon_vm_size < (1ull << radeon_vm_block_size)) {
- dev_warn(rdev->dev, "VM page table size (%d) to large\n",
+ (radeon_vm_size * 1024) < (1ull << radeon_vm_block_size)) {
+ dev_warn(rdev->dev, "VM page table size (%d) too large\n",
radeon_vm_block_size);
radeon_vm_block_size = 9;
}
@@ -1238,7 +1238,7 @@
/* Adjust VM size here.
* Max GPUVM size for cayman+ is 40 bits.
*/
- rdev->vm_manager.max_pfn = radeon_vm_size << 8;
+ rdev->vm_manager.max_pfn = radeon_vm_size << 18;
/* Set asic functions */
r = radeon_asic_init(rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 13896ed..bf25061 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -366,7 +366,6 @@
spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
drm_vblank_put(rdev->ddev, radeon_crtc->crtc_id);
- radeon_fence_unref(&work->fence);
radeon_irq_kms_pflip_irq_put(rdev, work->crtc_id);
queue_work(radeon_crtc->flip_queue, &work->unpin_work);
}
@@ -386,51 +385,108 @@
struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[work->crtc_id];
struct drm_crtc *crtc = &radeon_crtc->base;
- struct drm_framebuffer *fb = work->fb;
-
- uint32_t tiling_flags, pitch_pixels;
- uint64_t base;
-
unsigned long flags;
int r;
down_read(&rdev->exclusive_lock);
- while (work->fence) {
+ if (work->fence) {
r = radeon_fence_wait(work->fence, false);
if (r == -EDEADLK) {
up_read(&rdev->exclusive_lock);
r = radeon_gpu_reset(rdev);
down_read(&rdev->exclusive_lock);
}
+ if (r)
+ DRM_ERROR("failed to wait on page flip fence (%d)!\n", r);
- if (r) {
- DRM_ERROR("failed to wait on page flip fence (%d)!\n",
- r);
- goto cleanup;
- } else
- radeon_fence_unref(&work->fence);
+ /* We continue with the page flip even if we failed to wait on
+ * the fence, otherwise the DRM core and userspace will be
+ * confused about which BO the CRTC is scanning out
+ */
+
+ radeon_fence_unref(&work->fence);
}
- /* pin the new buffer */
- DRM_DEBUG_DRIVER("flip-ioctl() cur_fbo = %p, cur_bbo = %p\n",
- work->old_rbo, work->new_rbo);
+ /* We borrow the event spin lock for protecting flip_status */
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
- r = radeon_bo_reserve(work->new_rbo, false);
+ /* set the proper interrupt */
+ radeon_irq_kms_pflip_irq_get(rdev, radeon_crtc->crtc_id);
+
+ /* do the flip (mmio) */
+ radeon_page_flip(rdev, radeon_crtc->crtc_id, work->base);
+
+ radeon_crtc->flip_status = RADEON_FLIP_SUBMITTED;
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+ up_read(&rdev->exclusive_lock);
+}
+
+static int radeon_crtc_page_flip(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event,
+ uint32_t page_flip_flags)
+{
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ struct radeon_framebuffer *old_radeon_fb;
+ struct radeon_framebuffer *new_radeon_fb;
+ struct drm_gem_object *obj;
+ struct radeon_flip_work *work;
+ struct radeon_bo *new_rbo;
+ uint32_t tiling_flags, pitch_pixels;
+ uint64_t base;
+ unsigned long flags;
+ int r;
+
+ work = kzalloc(sizeof *work, GFP_KERNEL);
+ if (work == NULL)
+ return -ENOMEM;
+
+ INIT_WORK(&work->flip_work, radeon_flip_work_func);
+ INIT_WORK(&work->unpin_work, radeon_unpin_work_func);
+
+ work->rdev = rdev;
+ work->crtc_id = radeon_crtc->crtc_id;
+ work->event = event;
+
+ /* schedule unpin of the old buffer */
+ old_radeon_fb = to_radeon_framebuffer(crtc->primary->fb);
+ obj = old_radeon_fb->obj;
+
+ /* take a reference to the old object */
+ drm_gem_object_reference(obj);
+ work->old_rbo = gem_to_radeon_bo(obj);
+
+ new_radeon_fb = to_radeon_framebuffer(fb);
+ obj = new_radeon_fb->obj;
+ new_rbo = gem_to_radeon_bo(obj);
+
+ spin_lock(&new_rbo->tbo.bdev->fence_lock);
+ if (new_rbo->tbo.sync_obj)
+ work->fence = radeon_fence_ref(new_rbo->tbo.sync_obj);
+ spin_unlock(&new_rbo->tbo.bdev->fence_lock);
+
+ /* pin the new buffer */
+ DRM_DEBUG_DRIVER("flip-ioctl() cur_rbo = %p, new_rbo = %p\n",
+ work->old_rbo, new_rbo);
+
+ r = radeon_bo_reserve(new_rbo, false);
if (unlikely(r != 0)) {
DRM_ERROR("failed to reserve new rbo buffer before flip\n");
goto cleanup;
}
/* Only 27 bit offset for legacy CRTC */
- r = radeon_bo_pin_restricted(work->new_rbo, RADEON_GEM_DOMAIN_VRAM,
+ r = radeon_bo_pin_restricted(new_rbo, RADEON_GEM_DOMAIN_VRAM,
ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27, &base);
if (unlikely(r != 0)) {
- radeon_bo_unreserve(work->new_rbo);
+ radeon_bo_unreserve(new_rbo);
r = -EINVAL;
DRM_ERROR("failed to pin new rbo buffer before flip\n");
goto cleanup;
}
- radeon_bo_get_tiling_flags(work->new_rbo, &tiling_flags, NULL);
- radeon_bo_unreserve(work->new_rbo);
+ radeon_bo_get_tiling_flags(new_rbo, &tiling_flags, NULL);
+ radeon_bo_unreserve(new_rbo);
if (!ASIC_IS_AVIVO(rdev)) {
/* crtc offset is from display base addr not FB location */
@@ -467,6 +523,7 @@
}
base &= ~7;
}
+ work->base = base;
r = drm_vblank_get(crtc->dev, radeon_crtc->crtc_id);
if (r) {
@@ -477,88 +534,11 @@
/* We borrow the event spin lock for protecting flip_work */
spin_lock_irqsave(&crtc->dev->event_lock, flags);
- /* set the proper interrupt */
- radeon_irq_kms_pflip_irq_get(rdev, radeon_crtc->crtc_id);
-
- /* do the flip (mmio) */
- radeon_page_flip(rdev, radeon_crtc->crtc_id, base);
-
- radeon_crtc->flip_status = RADEON_FLIP_SUBMITTED;
- spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
- up_read(&rdev->exclusive_lock);
-
- return;
-
-pflip_cleanup:
- if (unlikely(radeon_bo_reserve(work->new_rbo, false) != 0)) {
- DRM_ERROR("failed to reserve new rbo in error path\n");
- goto cleanup;
- }
- if (unlikely(radeon_bo_unpin(work->new_rbo) != 0)) {
- DRM_ERROR("failed to unpin new rbo in error path\n");
- }
- radeon_bo_unreserve(work->new_rbo);
-
-cleanup:
- drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base);
- radeon_fence_unref(&work->fence);
- kfree(work);
- up_read(&rdev->exclusive_lock);
-}
-
-static int radeon_crtc_page_flip(struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- struct drm_pending_vblank_event *event,
- uint32_t page_flip_flags)
-{
- struct drm_device *dev = crtc->dev;
- struct radeon_device *rdev = dev->dev_private;
- struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
- struct radeon_framebuffer *old_radeon_fb;
- struct radeon_framebuffer *new_radeon_fb;
- struct drm_gem_object *obj;
- struct radeon_flip_work *work;
- unsigned long flags;
-
- work = kzalloc(sizeof *work, GFP_KERNEL);
- if (work == NULL)
- return -ENOMEM;
-
- INIT_WORK(&work->flip_work, radeon_flip_work_func);
- INIT_WORK(&work->unpin_work, radeon_unpin_work_func);
-
- work->rdev = rdev;
- work->crtc_id = radeon_crtc->crtc_id;
- work->fb = fb;
- work->event = event;
-
- /* schedule unpin of the old buffer */
- old_radeon_fb = to_radeon_framebuffer(crtc->primary->fb);
- obj = old_radeon_fb->obj;
-
- /* take a reference to the old object */
- drm_gem_object_reference(obj);
- work->old_rbo = gem_to_radeon_bo(obj);
-
- new_radeon_fb = to_radeon_framebuffer(fb);
- obj = new_radeon_fb->obj;
- work->new_rbo = gem_to_radeon_bo(obj);
-
- spin_lock(&work->new_rbo->tbo.bdev->fence_lock);
- if (work->new_rbo->tbo.sync_obj)
- work->fence = radeon_fence_ref(work->new_rbo->tbo.sync_obj);
- spin_unlock(&work->new_rbo->tbo.bdev->fence_lock);
-
- /* We borrow the event spin lock for protecting flip_work */
- spin_lock_irqsave(&crtc->dev->event_lock, flags);
-
if (radeon_crtc->flip_status != RADEON_FLIP_NONE) {
DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
- drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base);
- radeon_fence_unref(&work->fence);
- kfree(work);
- return -EBUSY;
+ r = -EBUSY;
+ goto vblank_cleanup;
}
radeon_crtc->flip_status = RADEON_FLIP_PENDING;
radeon_crtc->flip_work = work;
@@ -569,8 +549,27 @@
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
queue_work(radeon_crtc->flip_queue, &work->flip_work);
-
return 0;
+
+vblank_cleanup:
+ drm_vblank_put(crtc->dev, radeon_crtc->crtc_id);
+
+pflip_cleanup:
+ if (unlikely(radeon_bo_reserve(new_rbo, false) != 0)) {
+ DRM_ERROR("failed to reserve new rbo in error path\n");
+ goto cleanup;
+ }
+ if (unlikely(radeon_bo_unpin(new_rbo) != 0)) {
+ DRM_ERROR("failed to unpin new rbo in error path\n");
+ }
+ radeon_bo_unreserve(new_rbo);
+
+cleanup:
+ drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base);
+ radeon_fence_unref(&work->fence);
+ kfree(work);
+
+ return r;
}
static int
@@ -830,6 +829,10 @@
struct radeon_device *rdev = dev->dev_private;
int ret = 0;
+ /* don't leak the edid if we already fetched it in detect() */
+ if (radeon_connector->edid)
+ goto got_edid;
+
/* on hw with routers, select right port */
if (radeon_connector->router.ddc_valid)
radeon_router_select_ddc_port(radeon_connector);
@@ -868,6 +871,7 @@
radeon_connector->edid = radeon_bios_get_hardcoded_edid(rdev);
}
if (radeon_connector->edid) {
+got_edid:
drm_mode_connector_update_edid_property(&radeon_connector->base, radeon_connector->edid);
ret = drm_add_edid_modes(&radeon_connector->base, radeon_connector->edid);
drm_edid_to_eld(&radeon_connector->base, radeon_connector->edid);
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index cb14213..e9e3610 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -173,7 +173,7 @@
int radeon_aspm = -1;
int radeon_runtime_pm = -1;
int radeon_hard_reset = 0;
-int radeon_vm_size = 4096;
+int radeon_vm_size = 4;
int radeon_vm_block_size = 9;
int radeon_deep_color = 0;
@@ -243,7 +243,7 @@
MODULE_PARM_DESC(hard_reset, "PCI config reset (1 = force enable, 0 = disable (default))");
module_param_named(hard_reset, radeon_hard_reset, int, 0444);
-MODULE_PARM_DESC(vm_size, "VM address space size in megabytes (default 4GB)");
+MODULE_PARM_DESC(vm_size, "VM address space size in gigabytes (default 4GB)");
module_param_named(vm_size, radeon_vm_size, int, 0444);
MODULE_PARM_DESC(vm_block_size, "VM page table size in bits (default 9)");
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 35d9318..d25ae6a 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -579,7 +579,7 @@
/* new gpu have virtual address space support */
if (rdev->family >= CHIP_CAYMAN) {
struct radeon_fpriv *fpriv;
- struct radeon_bo_va *bo_va;
+ struct radeon_vm *vm;
int r;
fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
@@ -587,7 +587,8 @@
return -ENOMEM;
}
- r = radeon_vm_init(rdev, &fpriv->vm);
+ vm = &fpriv->vm;
+ r = radeon_vm_init(rdev, vm);
if (r) {
kfree(fpriv);
return r;
@@ -596,22 +597,23 @@
if (rdev->accel_working) {
r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
if (r) {
- radeon_vm_fini(rdev, &fpriv->vm);
+ radeon_vm_fini(rdev, vm);
kfree(fpriv);
return r;
}
/* map the ib pool buffer read only into
* virtual address space */
- bo_va = radeon_vm_bo_add(rdev, &fpriv->vm,
- rdev->ring_tmp_bo.bo);
- r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET,
+ vm->ib_bo_va = radeon_vm_bo_add(rdev, vm,
+ rdev->ring_tmp_bo.bo);
+ r = radeon_vm_bo_set_addr(rdev, vm->ib_bo_va,
+ RADEON_VA_IB_OFFSET,
RADEON_VM_PAGE_READABLE |
RADEON_VM_PAGE_SNOOPED);
radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
if (r) {
- radeon_vm_fini(rdev, &fpriv->vm);
+ radeon_vm_fini(rdev, vm);
kfree(fpriv);
return r;
}
@@ -640,21 +642,19 @@
/* new gpu have virtual address space support */
if (rdev->family >= CHIP_CAYMAN && file_priv->driver_priv) {
struct radeon_fpriv *fpriv = file_priv->driver_priv;
- struct radeon_bo_va *bo_va;
+ struct radeon_vm *vm = &fpriv->vm;
int r;
if (rdev->accel_working) {
r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
if (!r) {
- bo_va = radeon_vm_bo_find(&fpriv->vm,
- rdev->ring_tmp_bo.bo);
- if (bo_va)
- radeon_vm_bo_rmv(rdev, bo_va);
+ if (vm->ib_bo_va)
+ radeon_vm_bo_rmv(rdev, vm->ib_bo_va);
radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
}
}
- radeon_vm_fini(rdev, &fpriv->vm);
+ radeon_vm_fini(rdev, vm);
kfree(fpriv);
file_priv->driver_priv = NULL;
}
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index eecff6b..725d366 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -332,6 +332,7 @@
bo_va->ref_count = 1;
INIT_LIST_HEAD(&bo_va->bo_list);
INIT_LIST_HEAD(&bo_va->vm_list);
+ INIT_LIST_HEAD(&bo_va->vm_status);
mutex_lock(&vm->mutex);
list_add(&bo_va->vm_list, &vm->va);
@@ -468,6 +469,19 @@
head = &tmp->vm_list;
}
+ if (bo_va->soffset) {
+ /* add a clone of the bo_va to clear the old address */
+ tmp = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
+ if (!tmp) {
+ mutex_unlock(&vm->mutex);
+ return -ENOMEM;
+ }
+ tmp->soffset = bo_va->soffset;
+ tmp->eoffset = bo_va->eoffset;
+ tmp->vm = vm;
+ list_add(&tmp->vm_status, &vm->freed);
+ }
+
bo_va->soffset = soffset;
bo_va->eoffset = eoffset;
bo_va->flags = flags;
@@ -823,25 +837,19 @@
* Object have to be reserved and mutex must be locked!
*/
int radeon_vm_bo_update(struct radeon_device *rdev,
- struct radeon_vm *vm,
- struct radeon_bo *bo,
+ struct radeon_bo_va *bo_va,
struct ttm_mem_reg *mem)
{
+ struct radeon_vm *vm = bo_va->vm;
struct radeon_ib ib;
- struct radeon_bo_va *bo_va;
unsigned nptes, ndw;
uint64_t addr;
int r;
- bo_va = radeon_vm_bo_find(vm, bo);
- if (bo_va == NULL) {
- dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm);
- return -EINVAL;
- }
if (!bo_va->soffset) {
dev_err(rdev->dev, "bo %p don't has a mapping in vm %p\n",
- bo, vm);
+ bo_va->bo, vm);
return -EINVAL;
}
@@ -868,7 +876,7 @@
trace_radeon_vm_bo_update(bo_va);
- nptes = radeon_bo_ngpu_pages(bo);
+ nptes = (bo_va->eoffset - bo_va->soffset) / RADEON_GPU_PAGE_SIZE;
/* padding, etc. */
ndw = 64;
@@ -911,33 +919,61 @@
}
/**
+ * radeon_vm_clear_freed - clear freed BOs in the PT
+ *
+ * @rdev: radeon_device pointer
+ * @vm: requested vm
+ *
+ * Make sure all freed BOs are cleared in the PT.
+ * Returns 0 for success.
+ *
+ * PTs have to be reserved and mutex must be locked!
+ */
+int radeon_vm_clear_freed(struct radeon_device *rdev,
+ struct radeon_vm *vm)
+{
+ struct radeon_bo_va *bo_va, *tmp;
+ int r;
+
+ list_for_each_entry_safe(bo_va, tmp, &vm->freed, vm_status) {
+ list_del(&bo_va->vm_status);
+ r = radeon_vm_bo_update(rdev, bo_va, NULL);
+ kfree(bo_va);
+ if (r)
+ return r;
+ }
+ return 0;
+
+}
+
+/**
* radeon_vm_bo_rmv - remove a bo to a specific vm
*
* @rdev: radeon_device pointer
* @bo_va: requested bo_va
*
* Remove @bo_va->bo from the requested vm (cayman+).
- * Remove @bo_va->bo from the list of bos associated with the bo_va->vm and
- * remove the ptes for @bo_va in the page table.
- * Returns 0 for success.
*
* Object have to be reserved!
*/
-int radeon_vm_bo_rmv(struct radeon_device *rdev,
- struct radeon_bo_va *bo_va)
+void radeon_vm_bo_rmv(struct radeon_device *rdev,
+ struct radeon_bo_va *bo_va)
{
- int r = 0;
+ struct radeon_vm *vm = bo_va->vm;
- mutex_lock(&bo_va->vm->mutex);
- if (bo_va->soffset)
- r = radeon_vm_bo_update(rdev, bo_va->vm, bo_va->bo, NULL);
-
- list_del(&bo_va->vm_list);
- mutex_unlock(&bo_va->vm->mutex);
list_del(&bo_va->bo_list);
- kfree(bo_va);
- return r;
+ mutex_lock(&vm->mutex);
+ list_del(&bo_va->vm_list);
+
+ if (bo_va->soffset) {
+ bo_va->bo = NULL;
+ list_add(&bo_va->vm_status, &vm->freed);
+ } else {
+ kfree(bo_va);
+ }
+
+ mutex_unlock(&vm->mutex);
}
/**
@@ -975,11 +1011,13 @@
int r;
vm->id = 0;
+ vm->ib_bo_va = NULL;
vm->fence = NULL;
vm->last_flush = NULL;
vm->last_id_use = NULL;
mutex_init(&vm->mutex);
INIT_LIST_HEAD(&vm->va);
+ INIT_LIST_HEAD(&vm->freed);
pd_size = radeon_vm_directory_size(rdev);
pd_entries = radeon_vm_num_pdes(rdev);
@@ -1034,7 +1072,8 @@
kfree(bo_va);
}
}
-
+ list_for_each_entry_safe(bo_va, tmp, &vm->freed, vm_status)
+ kfree(bo_va);
for (i = 0; i < radeon_vm_num_pdes(rdev); i++)
radeon_bo_unref(&vm->page_tables[i].bo);
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 237dd29..3e21e86 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -406,8 +406,9 @@
for (i = 0; i < rdev->num_crtc; i++) {
if (save->crtc_enabled[i]) {
tmp = RREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + crtc_offsets[i]);
- if ((tmp & 0x3) != 0) {
- tmp &= ~0x3;
+ if ((tmp & 0x7) != 3) {
+ tmp &= ~0x7;
+ tmp |= 0x3;
WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + crtc_offsets[i], tmp);
}
tmp = RREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i]);
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index eba0225..9e854fd 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -6103,6 +6103,7 @@
tmp = RREG32(IH_RB_CNTL);
tmp |= IH_WPTR_OVERFLOW_CLEAR;
WREG32(IH_RB_CNTL, tmp);
+ wptr &= ~RB_OVERFLOW;
}
return (wptr & rdev->ih.ptr_mask);
}
diff --git a/drivers/gpu/drm/radeon/trinity_dpm.c b/drivers/gpu/drm/radeon/trinity_dpm.c
index 20da6ff..32e50be 100644
--- a/drivers/gpu/drm/radeon/trinity_dpm.c
+++ b/drivers/gpu/drm/radeon/trinity_dpm.c
@@ -1874,15 +1874,16 @@
for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++)
pi->at[i] = TRINITY_AT_DFLT;
- /* There are stability issues reported on latops with
- * bapm installed when switching between AC and battery
- * power. At the same time, some desktop boards hang
- * if it's not enabled and dpm is enabled.
+ /* There are stability issues reported on with
+ * bapm enabled when switching between AC and battery
+ * power. At the same time, some MSI boards hang
+ * if it's not enabled and dpm is enabled. Just enable
+ * it for MSI boards right now.
*/
- if (rdev->flags & RADEON_IS_MOBILITY)
- pi->enable_bapm = false;
- else
+ if (rdev->pdev->subsystem_vendor == 0x1462)
pi->enable_bapm = true;
+ else
+ pi->enable_bapm = false;
pi->enable_nbps_policy = true;
pi->enable_sclk_ds = true;
pi->enable_gfx_power_gating = true;
diff --git a/drivers/hv/hv_fcopy.c b/drivers/hv/hv_fcopy.c
index eaaa3d8..23b2ce2 100644
--- a/drivers/hv/hv_fcopy.c
+++ b/drivers/hv/hv_fcopy.c
@@ -246,8 +246,8 @@
/*
* Send the information to the user-level daemon.
*/
- fcopy_send_data();
schedule_delayed_work(&fcopy_work, 5*HZ);
+ fcopy_send_data();
return;
}
icmsghdr->icflags = ICMSGHDRFLAG_TRANSACTION | ICMSGHDRFLAG_RESPONSE;
diff --git a/drivers/hwmon/adt7470.c b/drivers/hwmon/adt7470.c
index 0f4dea5..9ee3913 100644
--- a/drivers/hwmon/adt7470.c
+++ b/drivers/hwmon/adt7470.c
@@ -515,7 +515,7 @@
return -EINVAL;
temp = DIV_ROUND_CLOSEST(temp, 1000);
- temp = clamp_val(temp, 0, 255);
+ temp = clamp_val(temp, -128, 127);
mutex_lock(&data->lock);
data->temp_min[attr->index] = temp;
@@ -549,7 +549,7 @@
return -EINVAL;
temp = DIV_ROUND_CLOSEST(temp, 1000);
- temp = clamp_val(temp, 0, 255);
+ temp = clamp_val(temp, -128, 127);
mutex_lock(&data->lock);
data->temp_max[attr->index] = temp;
@@ -826,7 +826,7 @@
return -EINVAL;
temp = DIV_ROUND_CLOSEST(temp, 1000);
- temp = clamp_val(temp, 0, 255);
+ temp = clamp_val(temp, -128, 127);
mutex_lock(&data->lock);
data->pwm_tmin[attr->index] = temp;
diff --git a/drivers/hwmon/da9052-hwmon.c b/drivers/hwmon/da9052-hwmon.c
index afd3104..d14ab3c 100644
--- a/drivers/hwmon/da9052-hwmon.c
+++ b/drivers/hwmon/da9052-hwmon.c
@@ -194,7 +194,7 @@
struct device_attribute *devattr,
char *buf)
{
- return sprintf(buf, "da9052-hwmon\n");
+ return sprintf(buf, "da9052\n");
}
static ssize_t show_label(struct device *dev,
diff --git a/drivers/hwmon/da9055-hwmon.c b/drivers/hwmon/da9055-hwmon.c
index 73b3865..35eb773 100644
--- a/drivers/hwmon/da9055-hwmon.c
+++ b/drivers/hwmon/da9055-hwmon.c
@@ -204,7 +204,7 @@
struct device_attribute *devattr,
char *buf)
{
- return sprintf(buf, "da9055-hwmon\n");
+ return sprintf(buf, "da9055\n");
}
static ssize_t show_label(struct device *dev,
diff --git a/drivers/hwmon/smsc47m192.c b/drivers/hwmon/smsc47m192.c
index efee4c5..34b9a60 100644
--- a/drivers/hwmon/smsc47m192.c
+++ b/drivers/hwmon/smsc47m192.c
@@ -86,7 +86,7 @@
*/
static inline s8 TEMP_TO_REG(int val)
{
- return clamp_val(SCALE(val, 1, 1000), -128000, 127000);
+ return SCALE(clamp_val(val, -128000, 127000), 1, 1000);
}
static inline int TEMP_FROM_REG(s8 val)
@@ -384,6 +384,8 @@
err = kstrtoul(buf, 10, &val);
if (err)
return err;
+ if (val > 255)
+ return -EINVAL;
data->vrm = val;
return count;
diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig
index 8fb46aa..a04c49f 100644
--- a/drivers/ide/Kconfig
+++ b/drivers/ide/Kconfig
@@ -416,6 +416,7 @@
config BLK_DEV_CS5520
tristate "Cyrix CS5510/20 MediaGX chipset support (VERY EXPERIMENTAL)"
+ depends on X86_32 || COMPILE_TEST
select BLK_DEV_IDEDMA_PCI
help
Include support for PIO tuning and virtual DMA on the Cyrix MediaGX
@@ -426,6 +427,7 @@
config BLK_DEV_CS5530
tristate "Cyrix/National Semiconductor CS5530 MediaGX chipset support"
+ depends on X86_32 || COMPILE_TEST
select BLK_DEV_IDEDMA_PCI
help
Include support for UDMA on the Cyrix MediaGX 5530 chipset. This
@@ -435,7 +437,7 @@
config BLK_DEV_CS5535
tristate "AMD CS5535 chipset support"
- depends on X86 && !X86_64
+ depends on X86_32
select BLK_DEV_IDEDMA_PCI
help
Include support for UDMA on the NSC/AMD CS5535 companion chipset.
@@ -486,6 +488,7 @@
config BLK_DEV_SC1200
tristate "National SCx200 chipset support"
+ depends on X86_32 || COMPILE_TEST
select BLK_DEV_IDEDMA_PCI
help
This driver adds support for the on-board IDE controller on the
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index 2a744a9..a3d3b17 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -853,8 +853,9 @@
if (irq_handler == NULL)
irq_handler = ide_intr;
- if (request_irq(hwif->irq, irq_handler, sa, hwif->name, hwif))
- goto out_up;
+ if (!host->get_lock)
+ if (request_irq(hwif->irq, irq_handler, sa, hwif->name, hwif))
+ goto out_up;
#if !defined(__mc68000__)
printk(KERN_INFO "%s at 0x%03lx-0x%03lx,0x%03lx on irq %d", hwif->name,
@@ -1533,7 +1534,8 @@
ide_proc_unregister_port(hwif);
- free_irq(hwif->irq, hwif);
+ if (!hwif->host->get_lock)
+ free_irq(hwif->irq, hwif);
device_unregister(hwif->portdev);
device_unregister(&hwif->gendev);
diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c
index 17aeea1..2a5fa9a 100644
--- a/drivers/iio/accel/mma8452.c
+++ b/drivers/iio/accel/mma8452.c
@@ -111,8 +111,14 @@
{6, 250000}, {1, 560000}
};
+/*
+ * Hardware has fullscale of -2G, -4G, -8G corresponding to raw value -2048
+ * The userspace interface uses m/s^2 and we declare micro units
+ * So scale factor is given by:
+ * g * N * 1000000 / 2048 for N = 2, 4, 8 and g=9.80665
+ */
static const int mma8452_scales[3][2] = {
- {0, 977}, {0, 1953}, {0, 3906}
+ {0, 9577}, {0, 19154}, {0, 38307}
};
static ssize_t mma8452_show_samp_freq_avail(struct device *dev,
diff --git a/drivers/iio/industrialio-event.c b/drivers/iio/industrialio-event.c
index 258a973..bfbf4d4 100644
--- a/drivers/iio/industrialio-event.c
+++ b/drivers/iio/industrialio-event.c
@@ -345,6 +345,9 @@
&indio_dev->event_interface->dev_attr_list);
kfree(postfix);
+ if ((ret == -EBUSY) && (shared_by != IIO_SEPARATE))
+ continue;
+
if (ret)
return ret;
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 5e153f6..768a0fb 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -432,8 +432,17 @@
*/
static void act_open_req_arp_failure(void *handle, struct sk_buff *skb)
{
+ struct c4iw_ep *ep = handle;
+
printk(KERN_ERR MOD "ARP failure duing connect\n");
kfree_skb(skb);
+ connect_reply_upcall(ep, -EHOSTUNREACH);
+ state_set(&ep->com, DEAD);
+ remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid);
+ cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid);
+ dst_release(ep->dst);
+ cxgb4_l2t_release(ep->l2t);
+ c4iw_put_ep(&ep->com);
}
/*
@@ -658,7 +667,7 @@
opt2 |= T5_OPT_2_VALID;
opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE);
}
- t4_set_arp_err_handler(skb, NULL, act_open_req_arp_failure);
+ t4_set_arp_err_handler(skb, ep, act_open_req_arp_failure);
if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) {
if (ep->com.remote_addr.ss_family == AF_INET) {
@@ -2180,7 +2189,6 @@
PDBG("%s c4iw_dev %p tid %u\n", __func__, dev, hwtid);
BUG_ON(skb_cloned(skb));
skb_trim(skb, sizeof(struct cpl_tid_release));
- skb_get(skb);
release_tid(&dev->rdev, hwtid, skb);
return;
}
@@ -3917,7 +3925,7 @@
return 0;
}
-void __exit c4iw_cm_term(void)
+void c4iw_cm_term(void)
{
WARN_ON(!list_empty(&timeout_list));
flush_workqueue(workq);
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index dd93aad..7db82b2 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -696,6 +696,7 @@
pr_err(MOD "error allocating status page\n");
goto err4;
}
+ rdev->status_page->db_off = 0;
return 0;
err4:
c4iw_rqtpool_destroy(rdev);
@@ -729,7 +730,6 @@
if (ctx->dev->rdev.oc_mw_kva)
iounmap(ctx->dev->rdev.oc_mw_kva);
ib_dealloc_device(&ctx->dev->ibdev);
- iwpm_exit(RDMA_NL_C4IW);
ctx->dev = NULL;
}
@@ -826,12 +826,6 @@
setup_debugfs(devp);
}
- ret = iwpm_init(RDMA_NL_C4IW);
- if (ret) {
- pr_err("port mapper initialization failed with %d\n", ret);
- ib_dealloc_device(&devp->ibdev);
- return ERR_PTR(ret);
- }
return devp;
}
@@ -1332,6 +1326,15 @@
pr_err("%s[%u]: Failed to add netlink callback\n"
, __func__, __LINE__);
+ err = iwpm_init(RDMA_NL_C4IW);
+ if (err) {
+ pr_err("port mapper initialization failed with %d\n", err);
+ ibnl_remove_client(RDMA_NL_C4IW);
+ c4iw_cm_term();
+ debugfs_remove_recursive(c4iw_debugfs_root);
+ return err;
+ }
+
cxgb4_register_uld(CXGB4_ULD_RDMA, &c4iw_uld_info);
return 0;
@@ -1349,6 +1352,7 @@
}
mutex_unlock(&dev_mutex);
cxgb4_unregister_uld(CXGB4_ULD_RDMA);
+ iwpm_exit(RDMA_NL_C4IW);
ibnl_remove_client(RDMA_NL_C4IW);
c4iw_cm_term();
debugfs_remove_recursive(c4iw_debugfs_root);
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index 125bc5d..361fff7 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -908,7 +908,7 @@
int c4iw_register_device(struct c4iw_dev *dev);
void c4iw_unregister_device(struct c4iw_dev *dev);
int __init c4iw_cm_init(void);
-void __exit c4iw_cm_term(void);
+void c4iw_cm_term(void);
void c4iw_release_dev_ucontext(struct c4iw_rdev *rdev,
struct c4iw_dev_ucontext *uctx);
void c4iw_init_dev_ucontext(struct c4iw_rdev *rdev,
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index d13ddf1..bbbcf38 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -675,7 +675,7 @@
int err;
uuari = &dev->mdev.priv.uuari;
- if (init_attr->create_flags & ~IB_QP_CREATE_SIGNATURE_EN)
+ if (init_attr->create_flags & ~(IB_QP_CREATE_SIGNATURE_EN | IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK))
return -EINVAL;
if (init_attr->qp_type == MLX5_IB_QPT_REG_UMR)
diff --git a/drivers/input/input.c b/drivers/input/input.c
index 1c4c0db..29ca0bb 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -257,9 +257,10 @@
}
static int input_get_disposition(struct input_dev *dev,
- unsigned int type, unsigned int code, int value)
+ unsigned int type, unsigned int code, int *pval)
{
int disposition = INPUT_IGNORE_EVENT;
+ int value = *pval;
switch (type) {
@@ -357,6 +358,7 @@
break;
}
+ *pval = value;
return disposition;
}
@@ -365,7 +367,7 @@
{
int disposition;
- disposition = input_get_disposition(dev, type, code, value);
+ disposition = input_get_disposition(dev, type, code, &value);
if ((disposition & INPUT_PASS_TO_DEVICE) && dev->event)
dev->event(dev, type, code, value);
diff --git a/drivers/input/keyboard/st-keyscan.c b/drivers/input/keyboard/st-keyscan.c
index 758b487..de7be4f 100644
--- a/drivers/input/keyboard/st-keyscan.c
+++ b/drivers/input/keyboard/st-keyscan.c
@@ -215,6 +215,7 @@
return 0;
}
+#ifdef CONFIG_PM_SLEEP
static int keyscan_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
@@ -249,6 +250,7 @@
mutex_unlock(&input->mutex);
return retval;
}
+#endif
static SIMPLE_DEV_PM_OPS(keyscan_dev_pm_ops, keyscan_suspend, keyscan_resume);
diff --git a/drivers/input/misc/sirfsoc-onkey.c b/drivers/input/misc/sirfsoc-onkey.c
index e4104f9..fed5102 100644
--- a/drivers/input/misc/sirfsoc-onkey.c
+++ b/drivers/input/misc/sirfsoc-onkey.c
@@ -213,7 +213,7 @@
module_platform_driver(sirfsoc_pwrc_driver);
-MODULE_LICENSE("GPLv2");
+MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Binghua Duan <Binghua.Duan@csr.com>, Xianglong Du <Xianglong.Du@csr.com>");
MODULE_DESCRIPTION("CSR Prima2 PWRC Driver");
MODULE_ALIAS("platform:sirfsoc-pwrc");
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index ec772d9..ef9e0b8 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -132,7 +132,8 @@
1232, 5710, 1156, 4696
},
{
- (const char * const []){"LEN0034", "LEN0036", "LEN2004", NULL},
+ (const char * const []){"LEN0034", "LEN0036", "LEN2002",
+ "LEN2004", NULL},
1024, 5112, 2024, 4832
},
{
@@ -168,7 +169,7 @@
"LEN0049",
"LEN2000",
"LEN2001", /* Edge E431 */
- "LEN2002",
+ "LEN2002", /* Edge E531 */
"LEN2003",
"LEN2004", /* L440 */
"LEN2005",
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index 381b20d..136b7b20 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -402,6 +402,13 @@
},
},
{
+ /* Acer Aspire 5710 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5710"),
+ },
+ },
+ {
/* Gericom Bellagio */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Gericom"),
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
index 977d05c..e73cf2c 100644
--- a/drivers/input/tablet/wacom_wac.c
+++ b/drivers/input/tablet/wacom_wac.c
@@ -1217,9 +1217,9 @@
* a=(pi*r^2)/C.
*/
int a = data[5];
- int x_res = input_abs_get_res(input, ABS_X);
- int y_res = input_abs_get_res(input, ABS_Y);
- width = 2 * int_sqrt(a * WACOM_CONTACT_AREA_SCALE);
+ int x_res = input_abs_get_res(input, ABS_MT_POSITION_X);
+ int y_res = input_abs_get_res(input, ABS_MT_POSITION_Y);
+ width = 2 * int_sqrt(a * WACOM_CONTACT_AREA_SCALE);
height = width * y_res / x_res;
}
@@ -1587,7 +1587,7 @@
input_abs_set_res(input_dev, ABS_X, features->x_resolution);
input_abs_set_res(input_dev, ABS_Y, features->y_resolution);
} else {
- if (features->touch_max <= 2) {
+ if (features->touch_max == 1) {
input_set_abs_params(input_dev, ABS_X, 0,
features->x_max, features->x_fuzz, 0);
input_set_abs_params(input_dev, ABS_Y, 0,
@@ -1815,14 +1815,8 @@
case MTTPC:
case MTTPC_B:
case TABLETPC2FG:
- if (features->device_type == BTN_TOOL_FINGER) {
- unsigned int flags = INPUT_MT_DIRECT;
-
- if (wacom_wac->features.type == TABLETPC2FG)
- flags = 0;
-
- input_mt_init_slots(input_dev, features->touch_max, flags);
- }
+ if (features->device_type == BTN_TOOL_FINGER && features->touch_max > 1)
+ input_mt_init_slots(input_dev, features->touch_max, INPUT_MT_DIRECT);
/* fall through */
case TABLETPC:
@@ -1883,10 +1877,6 @@
__set_bit(BTN_RIGHT, input_dev->keybit);
if (features->touch_max) {
- /* touch interface */
- unsigned int flags = INPUT_MT_POINTER;
-
- __set_bit(INPUT_PROP_POINTER, input_dev->propbit);
if (features->pktlen == WACOM_PKGLEN_BBTOUCH3) {
input_set_abs_params(input_dev,
ABS_MT_TOUCH_MAJOR,
@@ -1894,12 +1884,8 @@
input_set_abs_params(input_dev,
ABS_MT_TOUCH_MINOR,
0, features->y_max, 0, 0);
- } else {
- __set_bit(BTN_TOOL_FINGER, input_dev->keybit);
- __set_bit(BTN_TOOL_DOUBLETAP, input_dev->keybit);
- flags = 0;
}
- input_mt_init_slots(input_dev, features->touch_max, flags);
+ input_mt_init_slots(input_dev, features->touch_max, INPUT_MT_POINTER);
} else {
/* buttons/keys only interface */
__clear_bit(ABS_X, input_dev->absbit);
diff --git a/drivers/input/touchscreen/ti_am335x_tsc.c b/drivers/input/touchscreen/ti_am335x_tsc.c
index 4e793a1..2ce6495 100644
--- a/drivers/input/touchscreen/ti_am335x_tsc.c
+++ b/drivers/input/touchscreen/ti_am335x_tsc.c
@@ -359,9 +359,12 @@
*/
err = of_property_read_u32(node, "ti,coordinate-readouts",
&ts_dev->coordinate_readouts);
- if (err < 0)
+ if (err < 0) {
+ dev_warn(&pdev->dev, "please use 'ti,coordinate-readouts' instead\n");
err = of_property_read_u32(node, "ti,coordiante-readouts",
&ts_dev->coordinate_readouts);
+ }
+
if (err < 0)
return err;
diff --git a/drivers/iommu/fsl_pamu.c b/drivers/iommu/fsl_pamu.c
index b99dd88..bb446d7 100644
--- a/drivers/iommu/fsl_pamu.c
+++ b/drivers/iommu/fsl_pamu.c
@@ -170,10 +170,10 @@
static unsigned int map_addrspace_size_to_wse(phys_addr_t addrspace_size)
{
/* Bug if not a power of 2 */
- BUG_ON(!is_power_of_2(addrspace_size));
+ BUG_ON((addrspace_size & (addrspace_size - 1)));
/* window size is 2^(WSE+1) bytes */
- return __ffs(addrspace_size) - 1;
+ return fls64(addrspace_size) - 2;
}
/* Derive the PAACE window count encoding for the subwindow count */
@@ -351,7 +351,7 @@
struct paace *ppaace;
unsigned long fspi;
- if (!is_power_of_2(win_size) || win_size < PAMU_PAGE_SIZE) {
+ if ((win_size & (win_size - 1)) || win_size < PAMU_PAGE_SIZE) {
pr_debug("window size too small or not a power of two %llx\n", win_size);
return -EINVAL;
}
@@ -464,7 +464,7 @@
return -ENOENT;
}
- if (!is_power_of_2(subwin_size) || subwin_size < PAMU_PAGE_SIZE) {
+ if ((subwin_size & (subwin_size - 1)) || subwin_size < PAMU_PAGE_SIZE) {
pr_debug("subwindow size out of range, or not a power of 2\n");
return -EINVAL;
}
diff --git a/drivers/iommu/fsl_pamu_domain.c b/drivers/iommu/fsl_pamu_domain.c
index 93072ba..af47648 100644
--- a/drivers/iommu/fsl_pamu_domain.c
+++ b/drivers/iommu/fsl_pamu_domain.c
@@ -301,7 +301,7 @@
* Size must be a power of two and at least be equal
* to PAMU page size.
*/
- if (!is_power_of_2(size) || size < PAMU_PAGE_SIZE) {
+ if ((size & (size - 1)) || size < PAMU_PAGE_SIZE) {
pr_debug("%s: size too small or not a power of two\n", __func__);
return -EINVAL;
}
@@ -335,11 +335,6 @@
return domain;
}
-static inline struct device_domain_info *find_domain(struct device *dev)
-{
- return dev->archdata.iommu_domain;
-}
-
static void remove_device_ref(struct device_domain_info *info, u32 win_cnt)
{
unsigned long flags;
@@ -380,7 +375,7 @@
* Check here if the device is already attached to domain or not.
* If the device is already attached to a domain detach it.
*/
- old_domain_info = find_domain(dev);
+ old_domain_info = dev->archdata.iommu_domain;
if (old_domain_info && old_domain_info->domain != dma_domain) {
spin_unlock_irqrestore(&device_domain_lock, flags);
detach_device(dev, old_domain_info->domain);
@@ -399,7 +394,7 @@
* the info for the first LIODN as all
* LIODNs share the same domain
*/
- if (!old_domain_info)
+ if (!dev->archdata.iommu_domain)
dev->archdata.iommu_domain = info;
spin_unlock_irqrestore(&device_domain_lock, flags);
@@ -1042,12 +1037,15 @@
group = get_shared_pci_device_group(pdev);
}
+ if (!group)
+ group = ERR_PTR(-ENODEV);
+
return group;
}
static int fsl_pamu_add_device(struct device *dev)
{
- struct iommu_group *group = NULL;
+ struct iommu_group *group = ERR_PTR(-ENODEV);
struct pci_dev *pdev;
const u32 *prop;
int ret, len;
@@ -1070,7 +1068,7 @@
group = get_device_iommu_group(dev);
}
- if (!group || IS_ERR(group))
+ if (IS_ERR(group))
return PTR_ERR(group);
ret = iommu_group_add_device(group, dev);
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 7e11c9d..7c131cf 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -42,6 +42,7 @@
#include <linux/irqchip/chained_irq.h>
#include <linux/irqchip/arm-gic.h>
+#include <asm/cputype.h>
#include <asm/irq.h>
#include <asm/exception.h>
#include <asm/smp_plat.h>
@@ -954,7 +955,9 @@
}
for_each_possible_cpu(cpu) {
- unsigned long offset = percpu_offset * cpu_logical_map(cpu);
+ u32 mpidr = cpu_logical_map(cpu);
+ u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
+ unsigned long offset = percpu_offset * core_id;
*per_cpu_ptr(gic->dist_base.percpu_base, cpu) = dist_base + offset;
*per_cpu_ptr(gic->cpu_base.percpu_base, cpu) = cpu_base + offset;
}
@@ -1071,8 +1074,10 @@
gic_cnt++;
return 0;
}
+IRQCHIP_DECLARE(gic_400, "arm,gic-400", gic_of_init);
IRQCHIP_DECLARE(cortex_a15_gic, "arm,cortex-a15-gic", gic_of_init);
IRQCHIP_DECLARE(cortex_a9_gic, "arm,cortex-a9-gic", gic_of_init);
+IRQCHIP_DECLARE(cortex_a7_gic, "arm,cortex-a7-gic", gic_of_init);
IRQCHIP_DECLARE(msm_8660_qgic, "qcom,msm-8660-qgic", gic_of_init);
IRQCHIP_DECLARE(msm_qgic2, "qcom,msm-qgic2", gic_of_init);
diff --git a/drivers/isdn/hisax/l3ni1.c b/drivers/isdn/hisax/l3ni1.c
index 0df6691..8dc791b 100644
--- a/drivers/isdn/hisax/l3ni1.c
+++ b/drivers/isdn/hisax/l3ni1.c
@@ -2059,13 +2059,17 @@
memcpy(p, ic->parm.ni1_io.data, ic->parm.ni1_io.datalen); /* copy data */
l = (p - temp) + ic->parm.ni1_io.datalen; /* total length */
- if (ic->parm.ni1_io.timeout > 0)
- if (!(pc = ni1_new_l3_process(st, -1)))
- { free_invoke_id(st, id);
+ if (ic->parm.ni1_io.timeout > 0) {
+ pc = ni1_new_l3_process(st, -1);
+ if (!pc) {
+ free_invoke_id(st, id);
return (-2);
}
- pc->prot.ni1.ll_id = ic->parm.ni1_io.ll_id; /* remember id */
- pc->prot.ni1.proc = ic->parm.ni1_io.proc; /* and procedure */
+ /* remember id */
+ pc->prot.ni1.ll_id = ic->parm.ni1_io.ll_id;
+ /* and procedure */
+ pc->prot.ni1.proc = ic->parm.ni1_io.proc;
+ }
if (!(skb = l3_alloc_skb(l)))
{ free_invoke_id(st, id);
diff --git a/drivers/isdn/i4l/isdn_ppp.c b/drivers/isdn/i4l/isdn_ppp.c
index 61ac632..62f0688 100644
--- a/drivers/isdn/i4l/isdn_ppp.c
+++ b/drivers/isdn/i4l/isdn_ppp.c
@@ -442,7 +442,7 @@
{
struct sock_fprog uprog;
struct sock_filter *code = NULL;
- int len, err;
+ int len;
if (copy_from_user(&uprog, arg, sizeof(uprog)))
return -EFAULT;
@@ -458,12 +458,6 @@
if (IS_ERR(code))
return PTR_ERR(code);
- err = sk_chk_filter(code, uprog.len);
- if (err) {
- kfree(code);
- return err;
- }
-
*p = code;
return uprog.len;
}
@@ -644,9 +638,15 @@
fprog.len = len;
fprog.filter = code;
- if (is->pass_filter)
+ if (is->pass_filter) {
sk_unattached_filter_destroy(is->pass_filter);
- err = sk_unattached_filter_create(&is->pass_filter, &fprog);
+ is->pass_filter = NULL;
+ }
+ if (fprog.filter != NULL)
+ err = sk_unattached_filter_create(&is->pass_filter,
+ &fprog);
+ else
+ err = 0;
kfree(code);
return err;
@@ -663,9 +663,15 @@
fprog.len = len;
fprog.filter = code;
- if (is->active_filter)
+ if (is->active_filter) {
sk_unattached_filter_destroy(is->active_filter);
- err = sk_unattached_filter_create(&is->active_filter, &fprog);
+ is->active_filter = NULL;
+ }
+ if (fprog.filter != NULL)
+ err = sk_unattached_filter_create(&is->active_filter,
+ &fprog);
+ else
+ err = 0;
kfree(code);
return err;
diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
index 4ead4ba..d2899e7 100644
--- a/drivers/md/dm-cache-metadata.c
+++ b/drivers/md/dm-cache-metadata.c
@@ -425,6 +425,15 @@
disk_super = dm_block_data(sblock);
+ /* Verify the data block size hasn't changed */
+ if (le32_to_cpu(disk_super->data_block_size) != cmd->data_block_size) {
+ DMERR("changing the data block size (from %u to %llu) is not supported",
+ le32_to_cpu(disk_super->data_block_size),
+ (unsigned long long)cmd->data_block_size);
+ r = -EINVAL;
+ goto bad;
+ }
+
r = __check_incompat_features(disk_super, cmd);
if (r < 0)
goto bad;
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index b086a94..e9d33ad 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -613,6 +613,15 @@
disk_super = dm_block_data(sblock);
+ /* Verify the data block size hasn't changed */
+ if (le32_to_cpu(disk_super->data_block_size) != pmd->data_block_size) {
+ DMERR("changing the data block size (from %u to %llu) is not supported",
+ le32_to_cpu(disk_super->data_block_size),
+ (unsigned long long)pmd->data_block_size);
+ r = -EINVAL;
+ goto bad_unlock_sblock;
+ }
+
r = __check_incompat_features(disk_super, pmd);
if (r < 0)
goto bad_unlock_sblock;
diff --git a/drivers/media/dvb-frontends/si2168.c b/drivers/media/dvb-frontends/si2168.c
index 8637d2e..2e3cdcf 100644
--- a/drivers/media/dvb-frontends/si2168.c
+++ b/drivers/media/dvb-frontends/si2168.c
@@ -60,7 +60,7 @@
jiffies_to_msecs(jiffies) -
(jiffies_to_msecs(timeout) - TIMEOUT));
- if (!(cmd->args[0] >> 7) & 0x01) {
+ if (!((cmd->args[0] >> 7) & 0x01)) {
ret = -ETIMEDOUT;
goto err_mutex_unlock;
}
@@ -485,20 +485,6 @@
if (ret)
goto err;
- cmd.args[0] = 0x05;
- cmd.args[1] = 0x00;
- cmd.args[2] = 0xaa;
- cmd.args[3] = 0x4d;
- cmd.args[4] = 0x56;
- cmd.args[5] = 0x40;
- cmd.args[6] = 0x00;
- cmd.args[7] = 0x00;
- cmd.wlen = 8;
- cmd.rlen = 1;
- ret = si2168_cmd_execute(s, &cmd);
- if (ret)
- goto err;
-
/* cold state - try to download firmware */
dev_info(&s->client->dev, "%s: found a '%s' in cold state\n",
KBUILD_MODNAME, si2168_ops.info.name);
diff --git a/drivers/media/dvb-frontends/si2168_priv.h b/drivers/media/dvb-frontends/si2168_priv.h
index 2a343e8..53f7f06 100644
--- a/drivers/media/dvb-frontends/si2168_priv.h
+++ b/drivers/media/dvb-frontends/si2168_priv.h
@@ -22,7 +22,7 @@
#include <linux/firmware.h>
#include <linux/i2c-mux.h>
-#define SI2168_FIRMWARE "dvb-demod-si2168-01.fw"
+#define SI2168_FIRMWARE "dvb-demod-si2168-02.fw"
/* state struct */
struct si2168 {
diff --git a/drivers/media/dvb-frontends/tda10071.c b/drivers/media/dvb-frontends/tda10071.c
index 522fe00..9619be5 100644
--- a/drivers/media/dvb-frontends/tda10071.c
+++ b/drivers/media/dvb-frontends/tda10071.c
@@ -668,6 +668,7 @@
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
int ret, i;
u8 mode, rolloff, pilot, inversion, div;
+ fe_modulation_t modulation;
dev_dbg(&priv->i2c->dev,
"%s: delivery_system=%d modulation=%d frequency=%d symbol_rate=%d inversion=%d pilot=%d rolloff=%d\n",
@@ -702,10 +703,13 @@
switch (c->delivery_system) {
case SYS_DVBS:
+ modulation = QPSK;
rolloff = 0;
pilot = 2;
break;
case SYS_DVBS2:
+ modulation = c->modulation;
+
switch (c->rolloff) {
case ROLLOFF_20:
rolloff = 2;
@@ -750,7 +754,7 @@
for (i = 0, mode = 0xff; i < ARRAY_SIZE(TDA10071_MODCOD); i++) {
if (c->delivery_system == TDA10071_MODCOD[i].delivery_system &&
- c->modulation == TDA10071_MODCOD[i].modulation &&
+ modulation == TDA10071_MODCOD[i].modulation &&
c->fec_inner == TDA10071_MODCOD[i].fec) {
mode = TDA10071_MODCOD[i].val;
dev_dbg(&priv->i2c->dev, "%s: mode found=%02x\n",
@@ -834,10 +838,10 @@
switch ((buf[1] >> 0) & 0x01) {
case 0:
- c->inversion = INVERSION_OFF;
+ c->inversion = INVERSION_ON;
break;
case 1:
- c->inversion = INVERSION_ON;
+ c->inversion = INVERSION_OFF;
break;
}
@@ -856,7 +860,7 @@
if (ret)
goto error;
- c->symbol_rate = (buf[0] << 16) | (buf[1] << 8) | (buf[2] << 0);
+ c->symbol_rate = ((buf[0] << 16) | (buf[1] << 8) | (buf[2] << 0)) * 1000;
return ret;
error:
diff --git a/drivers/media/dvb-frontends/tda10071_priv.h b/drivers/media/dvb-frontends/tda10071_priv.h
index 4baf14b..4204861 100644
--- a/drivers/media/dvb-frontends/tda10071_priv.h
+++ b/drivers/media/dvb-frontends/tda10071_priv.h
@@ -55,6 +55,7 @@
{ SYS_DVBS2, QPSK, FEC_8_9, 0x0a },
{ SYS_DVBS2, QPSK, FEC_9_10, 0x0b },
/* 8PSK */
+ { SYS_DVBS2, PSK_8, FEC_AUTO, 0x00 },
{ SYS_DVBS2, PSK_8, FEC_3_5, 0x0c },
{ SYS_DVBS2, PSK_8, FEC_2_3, 0x0d },
{ SYS_DVBS2, PSK_8, FEC_3_4, 0x0e },
diff --git a/drivers/media/pci/saa7134/saa7134-empress.c b/drivers/media/pci/saa7134/saa7134-empress.c
index e65c760..0006d6b 100644
--- a/drivers/media/pci/saa7134/saa7134-empress.c
+++ b/drivers/media/pci/saa7134/saa7134-empress.c
@@ -179,7 +179,7 @@
.read = vb2_fop_read,
.poll = vb2_fop_poll,
.mmap = vb2_fop_mmap,
- .ioctl = video_ioctl2,
+ .unlocked_ioctl = video_ioctl2,
};
static const struct v4l2_ioctl_ops ts_ioctl_ops = {
diff --git a/drivers/media/platform/davinci/vpif_capture.c b/drivers/media/platform/davinci/vpif_capture.c
index a7ed164..1e4ec69 100644
--- a/drivers/media/platform/davinci/vpif_capture.c
+++ b/drivers/media/platform/davinci/vpif_capture.c
@@ -269,6 +269,7 @@
list_del(&buf->list);
vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED);
}
+ spin_unlock_irqrestore(&common->irqlock, flags);
return ret;
}
diff --git a/drivers/media/platform/davinci/vpif_display.c b/drivers/media/platform/davinci/vpif_display.c
index 5bb085b..b431b58 100644
--- a/drivers/media/platform/davinci/vpif_display.c
+++ b/drivers/media/platform/davinci/vpif_display.c
@@ -233,6 +233,7 @@
list_del(&buf->list);
vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED);
}
+ spin_unlock_irqrestore(&common->irqlock, flags);
return ret;
}
diff --git a/drivers/media/tuners/si2157.c b/drivers/media/tuners/si2157.c
index 271a752..fa4cc7b 100644
--- a/drivers/media/tuners/si2157.c
+++ b/drivers/media/tuners/si2157.c
@@ -57,7 +57,7 @@
jiffies_to_msecs(jiffies) -
(jiffies_to_msecs(timeout) - TIMEOUT));
- if (!(buf[0] >> 7) & 0x01) {
+ if (!((buf[0] >> 7) & 0x01)) {
ret = -ETIMEDOUT;
goto err_mutex_unlock;
} else {
diff --git a/drivers/media/usb/dvb-usb-v2/af9035.c b/drivers/media/usb/dvb-usb-v2/af9035.c
index 021e4d3..7b9b75f 100644
--- a/drivers/media/usb/dvb-usb-v2/af9035.c
+++ b/drivers/media/usb/dvb-usb-v2/af9035.c
@@ -704,15 +704,41 @@
if (ret < 0)
goto err;
- if (tmp == 0x00)
- dev_dbg(&d->udev->dev,
- "%s: [%d]tuner not set, using default\n",
- __func__, i);
- else
- state->af9033_config[i].tuner = tmp;
-
dev_dbg(&d->udev->dev, "%s: [%d]tuner=%02x\n",
- __func__, i, state->af9033_config[i].tuner);
+ __func__, i, tmp);
+
+ /* tuner sanity check */
+ if (state->chip_type == 0x9135) {
+ if (state->chip_version == 0x02) {
+ /* IT9135 BX (v2) */
+ switch (tmp) {
+ case AF9033_TUNER_IT9135_60:
+ case AF9033_TUNER_IT9135_61:
+ case AF9033_TUNER_IT9135_62:
+ state->af9033_config[i].tuner = tmp;
+ break;
+ }
+ } else {
+ /* IT9135 AX (v1) */
+ switch (tmp) {
+ case AF9033_TUNER_IT9135_38:
+ case AF9033_TUNER_IT9135_51:
+ case AF9033_TUNER_IT9135_52:
+ state->af9033_config[i].tuner = tmp;
+ break;
+ }
+ }
+ } else {
+ /* AF9035 */
+ state->af9033_config[i].tuner = tmp;
+ }
+
+ if (state->af9033_config[i].tuner != tmp) {
+ dev_info(&d->udev->dev,
+ "%s: [%d] overriding tuner from %02x to %02x\n",
+ KBUILD_MODNAME, i, tmp,
+ state->af9033_config[i].tuner);
+ }
switch (state->af9033_config[i].tuner) {
case AF9033_TUNER_TUA9001:
diff --git a/drivers/media/usb/gspca/pac7302.c b/drivers/media/usb/gspca/pac7302.c
index 2fd1c5e..339adce 100644
--- a/drivers/media/usb/gspca/pac7302.c
+++ b/drivers/media/usb/gspca/pac7302.c
@@ -928,6 +928,7 @@
{USB_DEVICE(0x093a, 0x2620)},
{USB_DEVICE(0x093a, 0x2621)},
{USB_DEVICE(0x093a, 0x2622), .driver_info = FL_VFLIP},
+ {USB_DEVICE(0x093a, 0x2623), .driver_info = FL_VFLIP},
{USB_DEVICE(0x093a, 0x2624), .driver_info = FL_VFLIP},
{USB_DEVICE(0x093a, 0x2625)},
{USB_DEVICE(0x093a, 0x2626)},
diff --git a/drivers/media/usb/hdpvr/hdpvr-video.c b/drivers/media/usb/hdpvr/hdpvr-video.c
index 0500c417..6bce01a 100644
--- a/drivers/media/usb/hdpvr/hdpvr-video.c
+++ b/drivers/media/usb/hdpvr/hdpvr-video.c
@@ -82,7 +82,7 @@
}
/*=========================================================================*/
-/* bufffer bits */
+/* buffer bits */
/* function expects dev->io_mutex to be hold by caller */
int hdpvr_cancel_queue(struct hdpvr_device *dev)
@@ -926,7 +926,7 @@
case V4L2_CID_MPEG_AUDIO_ENCODING:
if (dev->flags & HDPVR_FLAG_AC3_CAP) {
opt->audio_codec = ctrl->val;
- return hdpvr_set_audio(dev, opt->audio_input,
+ return hdpvr_set_audio(dev, opt->audio_input + 1,
opt->audio_codec);
}
return 0;
@@ -1198,7 +1198,7 @@
v4l2_ctrl_new_std_menu(hdl, &hdpvr_ctrl_ops,
V4L2_CID_MPEG_AUDIO_ENCODING,
ac3 ? V4L2_MPEG_AUDIO_ENCODING_AC3 : V4L2_MPEG_AUDIO_ENCODING_AAC,
- 0x7, V4L2_MPEG_AUDIO_ENCODING_AAC);
+ 0x7, ac3 ? dev->options.audio_codec : V4L2_MPEG_AUDIO_ENCODING_AAC);
v4l2_ctrl_new_std_menu(hdl, &hdpvr_ctrl_ops,
V4L2_CID_MPEG_VIDEO_ENCODING,
V4L2_MPEG_VIDEO_ENCODING_MPEG_4_AVC, 0x3,
diff --git a/drivers/media/v4l2-core/v4l2-dv-timings.c b/drivers/media/v4l2-core/v4l2-dv-timings.c
index 4ae54ca..ce1c9f5 100644
--- a/drivers/media/v4l2-core/v4l2-dv-timings.c
+++ b/drivers/media/v4l2-core/v4l2-dv-timings.c
@@ -610,10 +610,10 @@
aspect.denominator = 9;
} else if (ratio == 34) {
aspect.numerator = 4;
- aspect.numerator = 3;
+ aspect.denominator = 3;
} else if (ratio == 68) {
aspect.numerator = 15;
- aspect.numerator = 9;
+ aspect.denominator = 9;
} else {
aspect.numerator = hor_landscape + 99;
aspect.denominator = 100;
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
index e4ec355..a7543ba 100644
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -52,6 +52,11 @@
/* Atmel chips */
#define AT49BV640D 0x02de
#define AT49BV640DT 0x02db
+/* Sharp chips */
+#define LH28F640BFHE_PTTL90 0x00b0
+#define LH28F640BFHE_PBTL90 0x00b1
+#define LH28F640BFHE_PTTL70A 0x00b2
+#define LH28F640BFHE_PBTL70A 0x00b3
static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
@@ -258,6 +263,36 @@
(cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
};
+static int is_LH28F640BF(struct cfi_private *cfi)
+{
+ /* Sharp LH28F640BF Family */
+ if (cfi->mfr == CFI_MFR_SHARP && (
+ cfi->id == LH28F640BFHE_PTTL90 || cfi->id == LH28F640BFHE_PBTL90 ||
+ cfi->id == LH28F640BFHE_PTTL70A || cfi->id == LH28F640BFHE_PBTL70A))
+ return 1;
+ return 0;
+}
+
+static void fixup_LH28F640BF(struct mtd_info *mtd)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ struct cfi_pri_intelext *extp = cfi->cmdset_priv;
+
+ /* Reset the Partition Configuration Register on LH28F640BF
+ * to a single partition (PCR = 0x000): PCR is embedded into A0-A15. */
+ if (is_LH28F640BF(cfi)) {
+ printk(KERN_INFO "Reset Partition Config. Register: 1 Partition of 4 planes\n");
+ map_write(map, CMD(0x60), 0);
+ map_write(map, CMD(0x04), 0);
+
+ /* We have set one single partition thus
+ * Simultaneous Operations are not allowed */
+ printk(KERN_INFO "cfi_cmdset_0001: Simultaneous Operations disabled\n");
+ extp->FeatureSupport &= ~512;
+ }
+}
+
static void fixup_use_point(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
@@ -309,6 +344,8 @@
{ CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct },
{ CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb },
{ CFI_MFR_INTEL, CFI_ID_ANY, fixup_unlock_powerup_lock },
+ { CFI_MFR_SHARP, CFI_ID_ANY, fixup_unlock_powerup_lock },
+ { CFI_MFR_SHARP, CFI_ID_ANY, fixup_LH28F640BF },
{ 0, 0, NULL }
};
@@ -1649,6 +1686,12 @@
initial_adr = adr;
cmd_adr = adr & ~(wbufsize-1);
+ /* Sharp LH28F640BF chips need the first address for the
+ * Page Buffer Program command. See Table 5 of
+ * LH28F320BF, LH28F640BF, LH28F128BF Series (Appendix FUM00701) */
+ if (is_LH28F640BF(cfi))
+ cmd_adr = adr;
+
/* Let's determine this according to the interleave only once */
write_cmd = (cfi->cfiq->P_ID != P_ID_INTEL_PERFORMANCE) ? CMD(0xe8) : CMD(0xe9);
diff --git a/drivers/mtd/devices/elm.c b/drivers/mtd/devices/elm.c
index 7df8694..b4f61c7 100644
--- a/drivers/mtd/devices/elm.c
+++ b/drivers/mtd/devices/elm.c
@@ -475,6 +475,7 @@
ELM_SYNDROME_FRAGMENT_1 + offset);
regs->elm_syndrome_fragment_0[i] = elm_read_reg(info,
ELM_SYNDROME_FRAGMENT_0 + offset);
+ break;
default:
return -EINVAL;
}
@@ -520,6 +521,7 @@
regs->elm_syndrome_fragment_1[i]);
elm_write_reg(info, ELM_SYNDROME_FRAGMENT_0 + offset,
regs->elm_syndrome_fragment_0[i]);
+ break;
default:
return -EINVAL;
}
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 41167e9..4f3e80c 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -4047,8 +4047,10 @@
ecc->layout->oobavail += ecc->layout->oobfree[i].length;
mtd->oobavail = ecc->layout->oobavail;
- /* ECC sanity check: warn noisily if it's too weak */
- WARN_ON(!nand_ecc_strength_good(mtd));
+ /* ECC sanity check: warn if it's too weak */
+ if (!nand_ecc_strength_good(mtd))
+ pr_warn("WARNING: %s: the ECC used on your system is too weak compared to the one required by the NAND chip\n",
+ mtd->name);
/*
* Set the number of read / write steps for one page depending on ECC
diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
index b04e7d0..0431b46 100644
--- a/drivers/mtd/ubi/fastmap.c
+++ b/drivers/mtd/ubi/fastmap.c
@@ -125,7 +125,7 @@
parent = *p;
av = rb_entry(parent, struct ubi_ainf_volume, rb);
- if (vol_id < av->vol_id)
+ if (vol_id > av->vol_id)
p = &(*p)->rb_left;
else
p = &(*p)->rb_right;
@@ -423,7 +423,7 @@
pnum, err);
ret = err > 0 ? UBI_BAD_FASTMAP : err;
goto out;
- } else if (ret == UBI_IO_BITFLIPS)
+ } else if (err == UBI_IO_BITFLIPS)
scrub = 1;
/*
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 3a451b6..701f86c 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -4068,7 +4068,7 @@
}
if (ad_select) {
- bond_opt_initstr(&newval, lacp_rate);
+ bond_opt_initstr(&newval, ad_select);
valptr = bond_opt_parse(bond_opt_get(BOND_OPT_AD_SELECT),
&newval);
if (!valptr) {
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 141160e..5776e50 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -654,13 +654,13 @@
work_done = bcm_sysport_tx_reclaim(ring->priv, ring);
- if (work_done < budget) {
+ if (work_done == 0) {
napi_complete(napi);
/* re-enable TX interrupt */
intrl2_1_mask_clear(ring->priv, BIT(ring->index));
}
- return work_done;
+ return 0;
}
static void bcm_sysport_tx_reclaim_all(struct bcm_sysport_priv *priv)
@@ -1254,28 +1254,17 @@
usleep_range(1000, 2000);
}
-static inline int umac_reset(struct bcm_sysport_priv *priv)
+static inline void umac_reset(struct bcm_sysport_priv *priv)
{
- unsigned int timeout = 0;
u32 reg;
- int ret = 0;
- umac_writel(priv, 0, UMAC_CMD);
- while (timeout++ < 1000) {
- reg = umac_readl(priv, UMAC_CMD);
- if (!(reg & CMD_SW_RESET))
- break;
-
- udelay(1);
- }
-
- if (timeout == 1000) {
- dev_err(&priv->pdev->dev,
- "timeout waiting for MAC to come out of reset\n");
- ret = -ETIMEDOUT;
- }
-
- return ret;
+ reg = umac_readl(priv, UMAC_CMD);
+ reg |= CMD_SW_RESET;
+ umac_writel(priv, reg, UMAC_CMD);
+ udelay(10);
+ reg = umac_readl(priv, UMAC_CMD);
+ reg &= ~CMD_SW_RESET;
+ umac_writel(priv, reg, UMAC_CMD);
}
static void umac_set_hw_addr(struct bcm_sysport_priv *priv,
@@ -1303,11 +1292,7 @@
int ret;
/* Reset UniMAC */
- ret = umac_reset(priv);
- if (ret) {
- netdev_err(dev, "UniMAC reset failed\n");
- return ret;
- }
+ umac_reset(priv);
/* Flush TX and RX FIFOs at TOPCTRL level */
topctrl_flush(priv);
@@ -1589,12 +1574,6 @@
BUILD_BUG_ON(sizeof(struct bcm_tsb) != 8);
dev->needed_headroom += sizeof(struct bcm_tsb);
- /* We are interfaced to a switch which handles the multicast
- * filtering for us, so we do not support programming any
- * multicast hash table in this Ethernet MAC.
- */
- dev->flags &= ~IFF_MULTICAST;
-
/* libphy will adjust the link state accordingly */
netif_carrier_off(dev);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 47c5814..4b875da 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -797,7 +797,8 @@
return;
}
- bnx2x_frag_free(fp, new_data);
+ if (new_data)
+ bnx2x_frag_free(fp, new_data);
drop:
/* drop the packet and keep the buffer in the bin */
DP(NETIF_MSG_RX_STATUS,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 2887034..6a8b145 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -12937,7 +12937,7 @@
* without the default SB.
* For VFs there is no default SB, then we return (index+1).
*/
- pci_read_config_word(pdev, pdev->msix_cap + PCI_MSI_FLAGS, &control);
+ pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &control);
index = control & PCI_MSIX_FLAGS_QSIZE;
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 5ba1cfb..16281ad 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -1408,13 +1408,6 @@
if (cb->skb)
continue;
- /* set the DMA descriptor length once and for all
- * it will only change if we support dynamically sizing
- * priv->rx_buf_len, but we do not
- */
- dmadesc_set_length_status(priv, priv->rx_bd_assign_ptr,
- priv->rx_buf_len << DMA_BUFLENGTH_SHIFT);
-
ret = bcmgenet_rx_refill(priv, cb);
if (ret)
break;
@@ -2535,14 +2528,17 @@
netif_set_real_num_tx_queues(priv->dev, priv->hw_params->tx_queues + 1);
netif_set_real_num_rx_queues(priv->dev, priv->hw_params->rx_queues + 1);
- err = register_netdev(dev);
- if (err)
- goto err_clk_disable;
+ /* libphy will determine the link state */
+ netif_carrier_off(dev);
/* Turn off the main clock, WOL clock is handled separately */
if (!IS_ERR(priv->clk))
clk_disable_unprepare(priv->clk);
+ err = register_netdev(dev);
+ if (err)
+ goto err;
+
return err;
err_clk_disable:
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index 0f11710..e23c993 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -331,9 +331,9 @@
#define EXT_ENERGY_DET_MASK (1 << 12)
#define EXT_RGMII_OOB_CTRL 0x0C
-#define RGMII_MODE_EN (1 << 0)
#define RGMII_LINK (1 << 4)
#define OOB_DISABLE (1 << 5)
+#define RGMII_MODE_EN (1 << 6)
#define ID_MODE_DIS (1 << 16)
#define EXT_GPHY_CTRL 0x1C
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 34a26e4..1e187fb 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -2902,7 +2902,7 @@
for_all_evt_queues(adapter, eqo, i) {
napi_enable(&eqo->napi);
be_enable_busy_poll(eqo);
- be_eq_notify(adapter, eqo->q.id, true, false, 0);
+ be_eq_notify(adapter, eqo->q.id, true, true, 0);
}
adapter->flags |= BE_FLAGS_NAPI_ENABLED;
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index fab39e2..36fc429 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -2990,11 +2990,11 @@
if (ug_info->rxExtendedFiltering) {
size += THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING;
if (ug_info->largestexternallookupkeysize ==
- QE_FLTR_TABLE_LOOKUP_KEY_SIZE_8_BYTES)
+ QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_8_BYTES)
size +=
THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8;
if (ug_info->largestexternallookupkeysize ==
- QE_FLTR_TABLE_LOOKUP_KEY_SIZE_16_BYTES)
+ QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_16_BYTES)
size +=
THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16;
}
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index a2db388..ee74f95 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -1481,6 +1481,13 @@
s32 ret_val;
u16 i, rar_count = mac->rar_entry_count;
+ if ((hw->mac.type >= e1000_i210) &&
+ !(igb_get_flash_presence_i210(hw))) {
+ ret_val = igb_pll_workaround_i210(hw);
+ if (ret_val)
+ return ret_val;
+ }
+
/* Initialize identification LED */
ret_val = igb_id_led_init(hw);
if (ret_val) {
diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
index 2a8bb35..217f813 100644
--- a/drivers/net/ethernet/intel/igb/e1000_defines.h
+++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
@@ -46,14 +46,15 @@
#define E1000_CTRL_EXT_SDP3_DIR 0x00000800 /* SDP3 Data direction */
/* Physical Func Reset Done Indication */
-#define E1000_CTRL_EXT_PFRSTD 0x00004000
-#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000
-#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000
-#define E1000_CTRL_EXT_LINK_MODE_1000BASE_KX 0x00400000
-#define E1000_CTRL_EXT_LINK_MODE_SGMII 0x00800000
-#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000
-#define E1000_CTRL_EXT_EIAME 0x01000000
-#define E1000_CTRL_EXT_IRCA 0x00000001
+#define E1000_CTRL_EXT_PFRSTD 0x00004000
+#define E1000_CTRL_EXT_SDLPE 0X00040000 /* SerDes Low Power Enable */
+#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000
+#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000
+#define E1000_CTRL_EXT_LINK_MODE_1000BASE_KX 0x00400000
+#define E1000_CTRL_EXT_LINK_MODE_SGMII 0x00800000
+#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000
+#define E1000_CTRL_EXT_EIAME 0x01000000
+#define E1000_CTRL_EXT_IRCA 0x00000001
/* Interrupt delay cancellation */
/* Driver loaded bit for FW */
#define E1000_CTRL_EXT_DRV_LOAD 0x10000000
@@ -62,6 +63,7 @@
/* packet buffer parity error detection enabled */
/* descriptor FIFO parity error detection enable */
#define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */
+#define E1000_CTRL_EXT_PHYPDEN 0x00100000
#define E1000_I2CCMD_REG_ADDR_SHIFT 16
#define E1000_I2CCMD_PHY_ADDR_SHIFT 24
#define E1000_I2CCMD_OPCODE_READ 0x08000000
diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h
index 89925e4..ce55ea5 100644
--- a/drivers/net/ethernet/intel/igb/e1000_hw.h
+++ b/drivers/net/ethernet/intel/igb/e1000_hw.h
@@ -567,4 +567,7 @@
/* These functions must be implemented by drivers */
s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
+
+void igb_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value);
+void igb_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value);
#endif /* _E1000_HW_H_ */
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c
index 337161f..65d9316 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.c
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.c
@@ -834,3 +834,69 @@
}
return ret_val;
}
+
+/**
+ * igb_pll_workaround_i210
+ * @hw: pointer to the HW structure
+ *
+ * Works around an errata in the PLL circuit where it occasionally
+ * provides the wrong clock frequency after power up.
+ **/
+s32 igb_pll_workaround_i210(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u32 wuc, mdicnfg, ctrl, ctrl_ext, reg_val;
+ u16 nvm_word, phy_word, pci_word, tmp_nvm;
+ int i;
+
+ /* Get and set needed register values */
+ wuc = rd32(E1000_WUC);
+ mdicnfg = rd32(E1000_MDICNFG);
+ reg_val = mdicnfg & ~E1000_MDICNFG_EXT_MDIO;
+ wr32(E1000_MDICNFG, reg_val);
+
+ /* Get data from NVM, or set default */
+ ret_val = igb_read_invm_word_i210(hw, E1000_INVM_AUTOLOAD,
+ &nvm_word);
+ if (ret_val)
+ nvm_word = E1000_INVM_DEFAULT_AL;
+ tmp_nvm = nvm_word | E1000_INVM_PLL_WO_VAL;
+ for (i = 0; i < E1000_MAX_PLL_TRIES; i++) {
+ /* check current state directly from internal PHY */
+ igb_read_phy_reg_gs40g(hw, (E1000_PHY_PLL_FREQ_PAGE |
+ E1000_PHY_PLL_FREQ_REG), &phy_word);
+ if ((phy_word & E1000_PHY_PLL_UNCONF)
+ != E1000_PHY_PLL_UNCONF) {
+ ret_val = 0;
+ break;
+ } else {
+ ret_val = -E1000_ERR_PHY;
+ }
+ /* directly reset the internal PHY */
+ ctrl = rd32(E1000_CTRL);
+ wr32(E1000_CTRL, ctrl|E1000_CTRL_PHY_RST);
+
+ ctrl_ext = rd32(E1000_CTRL_EXT);
+ ctrl_ext |= (E1000_CTRL_EXT_PHYPDEN | E1000_CTRL_EXT_SDLPE);
+ wr32(E1000_CTRL_EXT, ctrl_ext);
+
+ wr32(E1000_WUC, 0);
+ reg_val = (E1000_INVM_AUTOLOAD << 4) | (tmp_nvm << 16);
+ wr32(E1000_EEARBC_I210, reg_val);
+
+ igb_read_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
+ pci_word |= E1000_PCI_PMCSR_D3;
+ igb_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
+ usleep_range(1000, 2000);
+ pci_word &= ~E1000_PCI_PMCSR_D3;
+ igb_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
+ reg_val = (E1000_INVM_AUTOLOAD << 4) | (nvm_word << 16);
+ wr32(E1000_EEARBC_I210, reg_val);
+
+ /* restore WUC register */
+ wr32(E1000_WUC, wuc);
+ }
+ /* restore MDICNFG setting */
+ wr32(E1000_MDICNFG, mdicnfg);
+ return ret_val;
+}
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.h b/drivers/net/ethernet/intel/igb/e1000_i210.h
index 9f34976..3442b63 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.h
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.h
@@ -33,6 +33,7 @@
s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data);
s32 igb_init_nvm_params_i210(struct e1000_hw *hw);
bool igb_get_flash_presence_i210(struct e1000_hw *hw);
+s32 igb_pll_workaround_i210(struct e1000_hw *hw);
#define E1000_STM_OPCODE 0xDB00
#define E1000_EEPROM_FLASH_SIZE_WORD 0x11
@@ -78,4 +79,15 @@
#define NVM_LED_1_CFG_DEFAULT_I211 0x0184
#define NVM_LED_0_2_CFG_DEFAULT_I211 0x200C
+/* PLL Defines */
+#define E1000_PCI_PMCSR 0x44
+#define E1000_PCI_PMCSR_D3 0x03
+#define E1000_MAX_PLL_TRIES 5
+#define E1000_PHY_PLL_UNCONF 0xFF
+#define E1000_PHY_PLL_FREQ_PAGE 0xFC0000
+#define E1000_PHY_PLL_FREQ_REG 0x000E
+#define E1000_INVM_DEFAULT_AL 0x202F
+#define E1000_INVM_AUTOLOAD 0x0A
+#define E1000_INVM_PLL_WO_VAL 0x0010
+
#endif
diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h
index 1cc4b1a7..f5ba4e4 100644
--- a/drivers/net/ethernet/intel/igb/e1000_regs.h
+++ b/drivers/net/ethernet/intel/igb/e1000_regs.h
@@ -66,6 +66,7 @@
#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */
#define E1000_PBS 0x01008 /* Packet Buffer Size */
#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */
+#define E1000_EEARBC_I210 0x12024 /* EEPROM Auto Read Bus Control */
#define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */
#define E1000_I2CCMD 0x01028 /* SFPI2C Command Register - RW */
#define E1000_FRTIMER 0x01048 /* Free Running Timer - RW */
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index f145adb..a9537ba 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -7215,6 +7215,20 @@
}
}
+void igb_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
+{
+ struct igb_adapter *adapter = hw->back;
+
+ pci_read_config_word(adapter->pdev, reg, value);
+}
+
+void igb_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
+{
+ struct igb_adapter *adapter = hw->back;
+
+ pci_write_config_word(adapter->pdev, reg, *value);
+}
+
s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
{
struct igb_adapter *adapter = hw->back;
@@ -7578,6 +7592,8 @@
if (netif_running(netdev))
igb_close(netdev);
+ else
+ igb_reset(adapter);
igb_clear_interrupt_scheme(adapter);
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 45beca1..dadd9a5 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -1207,7 +1207,7 @@
command = l3_offs << MVNETA_TX_L3_OFF_SHIFT;
command |= ip_hdr_len << MVNETA_TX_IP_HLEN_SHIFT;
- if (l3_proto == swab16(ETH_P_IP))
+ if (l3_proto == htons(ETH_P_IP))
command |= MVNETA_TXD_IP_CSUM;
else
command |= MVNETA_TX_L3_IP6;
@@ -2529,7 +2529,7 @@
if (phydev->speed == SPEED_1000)
val |= MVNETA_GMAC_CONFIG_GMII_SPEED;
- else
+ else if (phydev->speed == SPEED_100)
val |= MVNETA_GMAC_CONFIG_MII_SPEED;
mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c
index 80f7252..56022d6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cq.c
@@ -294,8 +294,6 @@
init_completion(&cq->free);
cq->irq = priv->eq_table.eq[cq->vector].irq;
- cq->irq_affinity_change = false;
-
return 0;
err_radix:
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
index 4b21307..82322b1 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
@@ -128,11 +128,16 @@
mlx4_warn(mdev, "Failed assigning an EQ to %s, falling back to legacy EQ's\n",
name);
}
+
}
} else {
cq->vector = (cq->ring + 1 + priv->port) %
mdev->dev->caps.num_comp_vectors;
}
+
+ cq->irq_desc =
+ irq_to_desc(mlx4_eq_get_irq(mdev->dev,
+ cq->vector));
} else {
/* For TX we use the same irq per
ring we assigned for the RX */
@@ -187,8 +192,6 @@
mlx4_en_unmap_buffer(&cq->wqres.buf);
mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
if (priv->mdev->dev->caps.comp_pool && cq->vector) {
- if (!cq->is_tx)
- irq_set_affinity_hint(cq->mcq.irq, NULL);
mlx4_release_eq(priv->mdev->dev, cq->vector);
}
cq->vector = 0;
@@ -204,6 +207,7 @@
if (!cq->is_tx) {
napi_hash_del(&cq->napi);
synchronize_rcu();
+ irq_set_affinity_hint(cq->mcq.irq, NULL);
}
netif_napi_del(&cq->napi);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index fa1a069..68d763d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -417,6 +417,8 @@
coal->tx_coalesce_usecs = priv->tx_usecs;
coal->tx_max_coalesced_frames = priv->tx_frames;
+ coal->tx_max_coalesced_frames_irq = priv->tx_work_limit;
+
coal->rx_coalesce_usecs = priv->rx_usecs;
coal->rx_max_coalesced_frames = priv->rx_frames;
@@ -426,6 +428,7 @@
coal->rx_coalesce_usecs_high = priv->rx_usecs_high;
coal->rate_sample_interval = priv->sample_interval;
coal->use_adaptive_rx_coalesce = priv->adaptive_rx_coal;
+
return 0;
}
@@ -434,6 +437,9 @@
{
struct mlx4_en_priv *priv = netdev_priv(dev);
+ if (!coal->tx_max_coalesced_frames_irq)
+ return -EINVAL;
+
priv->rx_frames = (coal->rx_max_coalesced_frames ==
MLX4_EN_AUTO_CONF) ?
MLX4_EN_RX_COAL_TARGET :
@@ -457,6 +463,7 @@
priv->rx_usecs_high = coal->rx_coalesce_usecs_high;
priv->sample_interval = coal->rate_sample_interval;
priv->adaptive_rx_coal = coal->use_adaptive_rx_coalesce;
+ priv->tx_work_limit = coal->tx_max_coalesced_frames_irq;
return mlx4_en_moderation_update(priv);
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 7d4fb7b..7345c43 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -2336,7 +2336,7 @@
struct mlx4_en_priv *priv = netdev_priv(dev);
__be16 current_port;
- if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS))
+ if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
return;
if (sa_family == AF_INET6)
@@ -2473,6 +2473,7 @@
MLX4_WQE_CTRL_SOLICITED);
priv->num_tx_rings_p_up = mdev->profile.num_tx_rings_p_up;
priv->tx_ring_num = prof->tx_ring_num;
+ priv->tx_work_limit = MLX4_EN_DEFAULT_TX_WORK;
priv->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring *) * MAX_TX_RINGS,
GFP_KERNEL);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index d2d4157..5535862 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -40,6 +40,7 @@
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
#include <linux/vmalloc.h>
+#include <linux/irq.h>
#include "mlx4_en.h"
@@ -782,6 +783,7 @@
PKT_HASH_TYPE_L3);
skb_record_rx_queue(gro_skb, cq->ring);
+ skb_mark_napi_id(gro_skb, &cq->napi);
if (ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL) {
timestamp = mlx4_en_get_cqe_ts(cqe);
@@ -896,16 +898,25 @@
/* If we used up all the quota - we're probably not done yet... */
if (done == budget) {
+ int cpu_curr;
+ const struct cpumask *aff;
+
INC_PERF_COUNTER(priv->pstats.napi_quota);
- if (unlikely(cq->mcq.irq_affinity_change)) {
- cq->mcq.irq_affinity_change = false;
+
+ cpu_curr = smp_processor_id();
+ aff = irq_desc_get_irq_data(cq->irq_desc)->affinity;
+
+ if (unlikely(!cpumask_test_cpu(cpu_curr, aff))) {
+ /* Current cpu is not according to smp_irq_affinity -
+ * probably affinity changed. need to stop this NAPI
+ * poll, and restart it on the right CPU
+ */
napi_complete(napi);
mlx4_en_arm_cq(priv, cq);
return 0;
}
} else {
/* Done for now */
- cq->mcq.irq_affinity_change = false;
napi_complete(napi);
mlx4_en_arm_cq(priv, cq);
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 8be7483..5045bab 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -351,9 +351,8 @@
return cnt;
}
-static int mlx4_en_process_tx_cq(struct net_device *dev,
- struct mlx4_en_cq *cq,
- int budget)
+static bool mlx4_en_process_tx_cq(struct net_device *dev,
+ struct mlx4_en_cq *cq)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_cq *mcq = &cq->mcq;
@@ -372,9 +371,10 @@
int factor = priv->cqe_factor;
u64 timestamp = 0;
int done = 0;
+ int budget = priv->tx_work_limit;
if (!priv->port_up)
- return 0;
+ return true;
index = cons_index & size_mask;
cqe = &buf[(index << factor) + factor];
@@ -447,7 +447,7 @@
netif_tx_wake_queue(ring->tx_queue);
ring->wake_queue++;
}
- return done;
+ return done < budget;
}
void mlx4_en_tx_irq(struct mlx4_cq *mcq)
@@ -467,24 +467,16 @@
struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi);
struct net_device *dev = cq->dev;
struct mlx4_en_priv *priv = netdev_priv(dev);
- int done;
+ int clean_complete;
- done = mlx4_en_process_tx_cq(dev, cq, budget);
+ clean_complete = mlx4_en_process_tx_cq(dev, cq);
+ if (!clean_complete)
+ return budget;
- /* If we used up all the quota - we're probably not done yet... */
- if (done < budget) {
- /* Done for now */
- cq->mcq.irq_affinity_change = false;
- napi_complete(napi);
- mlx4_en_arm_cq(priv, cq);
- return done;
- } else if (unlikely(cq->mcq.irq_affinity_change)) {
- cq->mcq.irq_affinity_change = false;
- napi_complete(napi);
- mlx4_en_arm_cq(priv, cq);
- return 0;
- }
- return budget;
+ napi_complete(napi);
+ mlx4_en_arm_cq(priv, cq);
+
+ return 0;
}
static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv,
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index d954ec1..2a004b3 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -53,11 +53,6 @@
MLX4_EQ_ENTRY_SIZE = 0x20
};
-struct mlx4_irq_notify {
- void *arg;
- struct irq_affinity_notify notify;
-};
-
#define MLX4_EQ_STATUS_OK ( 0 << 28)
#define MLX4_EQ_STATUS_WRITE_FAIL (10 << 28)
#define MLX4_EQ_OWNER_SW ( 0 << 24)
@@ -1088,57 +1083,6 @@
iounmap(priv->clr_base);
}
-static void mlx4_irq_notifier_notify(struct irq_affinity_notify *notify,
- const cpumask_t *mask)
-{
- struct mlx4_irq_notify *n = container_of(notify,
- struct mlx4_irq_notify,
- notify);
- struct mlx4_priv *priv = (struct mlx4_priv *)n->arg;
- struct radix_tree_iter iter;
- void **slot;
-
- radix_tree_for_each_slot(slot, &priv->cq_table.tree, &iter, 0) {
- struct mlx4_cq *cq = (struct mlx4_cq *)(*slot);
-
- if (cq->irq == notify->irq)
- cq->irq_affinity_change = true;
- }
-}
-
-static void mlx4_release_irq_notifier(struct kref *ref)
-{
- struct mlx4_irq_notify *n = container_of(ref, struct mlx4_irq_notify,
- notify.kref);
- kfree(n);
-}
-
-static void mlx4_assign_irq_notifier(struct mlx4_priv *priv,
- struct mlx4_dev *dev, int irq)
-{
- struct mlx4_irq_notify *irq_notifier = NULL;
- int err = 0;
-
- irq_notifier = kzalloc(sizeof(*irq_notifier), GFP_KERNEL);
- if (!irq_notifier) {
- mlx4_warn(dev, "Failed to allocate irq notifier. irq %d\n",
- irq);
- return;
- }
-
- irq_notifier->notify.irq = irq;
- irq_notifier->notify.notify = mlx4_irq_notifier_notify;
- irq_notifier->notify.release = mlx4_release_irq_notifier;
- irq_notifier->arg = priv;
- err = irq_set_affinity_notifier(irq, &irq_notifier->notify);
- if (err) {
- kfree(irq_notifier);
- irq_notifier = NULL;
- mlx4_warn(dev, "Failed to set irq notifier. irq %d\n", irq);
- }
-}
-
-
int mlx4_alloc_eq_table(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
@@ -1409,8 +1353,6 @@
continue;
/*we dont want to break here*/
}
- mlx4_assign_irq_notifier(priv, dev,
- priv->eq_table.eq[vec].irq);
eq_set_ci(&priv->eq_table.eq[vec], 1);
}
@@ -1427,6 +1369,14 @@
}
EXPORT_SYMBOL(mlx4_assign_eq);
+int mlx4_eq_get_irq(struct mlx4_dev *dev, int vec)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+
+ return priv->eq_table.eq[vec].irq;
+}
+EXPORT_SYMBOL(mlx4_eq_get_irq);
+
void mlx4_release_eq(struct mlx4_dev *dev, int vec)
{
struct mlx4_priv *priv = mlx4_priv(dev);
@@ -1438,9 +1388,6 @@
Belonging to a legacy EQ*/
mutex_lock(&priv->msix_ctl.pool_lock);
if (priv->msix_ctl.pool_bm & 1ULL << i) {
- irq_set_affinity_notifier(
- priv->eq_table.eq[vec].irq,
- NULL);
free_irq(priv->eq_table.eq[vec].irq,
&priv->eq_table.eq[vec]);
priv->msix_ctl.pool_bm &= ~(1ULL << i);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 0e15295..d72a5a8 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -126,6 +126,8 @@
#define MAX_TX_RINGS (MLX4_EN_MAX_TX_RING_P_UP * \
MLX4_EN_NUM_UP)
+#define MLX4_EN_DEFAULT_TX_WORK 256
+
/* Target number of packets to coalesce with interrupt moderation */
#define MLX4_EN_RX_COAL_TARGET 44
#define MLX4_EN_RX_COAL_TIME 0x10
@@ -343,6 +345,7 @@
#define CQ_USER_PEND (MLX4_EN_CQ_STATE_POLL | MLX4_EN_CQ_STATE_POLL_YIELD)
spinlock_t poll_lock; /* protects from LLS/napi conflicts */
#endif /* CONFIG_NET_RX_BUSY_POLL */
+ struct irq_desc *irq_desc;
};
struct mlx4_en_port_profile {
@@ -542,6 +545,7 @@
__be32 ctrl_flags;
u32 flags;
u8 num_tx_rings_p_up;
+ u32 tx_work_limit;
u32 tx_ring_num;
u32 rx_ring_num;
u32 rx_skb_size;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mr.c b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
index ba0401d4..184c361 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
@@ -94,6 +94,11 @@
write_lock_irq(&table->lock);
err = radix_tree_insert(&table->tree, mlx5_base_mkey(mr->key), mr);
write_unlock_irq(&table->lock);
+ if (err) {
+ mlx5_core_warn(dev, "failed radix tree insert of mr 0x%x, %d\n",
+ mlx5_base_mkey(mr->key), err);
+ mlx5_core_destroy_mkey(dev, mr);
+ }
return err;
}
@@ -104,12 +109,22 @@
struct mlx5_mr_table *table = &dev->priv.mr_table;
struct mlx5_destroy_mkey_mbox_in in;
struct mlx5_destroy_mkey_mbox_out out;
+ struct mlx5_core_mr *deleted_mr;
unsigned long flags;
int err;
memset(&in, 0, sizeof(in));
memset(&out, 0, sizeof(out));
+ write_lock_irqsave(&table->lock, flags);
+ deleted_mr = radix_tree_delete(&table->tree, mlx5_base_mkey(mr->key));
+ write_unlock_irqrestore(&table->lock, flags);
+ if (!deleted_mr) {
+ mlx5_core_warn(dev, "failed radix tree delete of mr 0x%x\n",
+ mlx5_base_mkey(mr->key));
+ return -ENOENT;
+ }
+
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_MKEY);
in.mkey = cpu_to_be32(mlx5_mkey_to_idx(mr->key));
err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
@@ -119,10 +134,6 @@
if (out.hdr.status)
return mlx5_cmd_status_to_err(&out.hdr);
- write_lock_irqsave(&table->lock, flags);
- radix_tree_delete(&table->tree, mlx5_base_mkey(mr->key));
- write_unlock_irqrestore(&table->lock, flags);
-
return err;
}
EXPORT_SYMBOL(mlx5_core_destroy_mkey);
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index be425ad..61623e9 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -538,6 +538,7 @@
MagicPacket = (1 << 5), /* Wake up when receives a Magic Packet */
LinkUp = (1 << 4), /* Wake up when the cable connection is re-established */
Jumbo_En0 = (1 << 2), /* 8168 only. Reserved in the 8168b */
+ Rdy_to_L23 = (1 << 1), /* L23 Enable */
Beacon_en = (1 << 0), /* 8168 only. Reserved in the 8168b */
/* Config4 register */
@@ -4239,6 +4240,8 @@
RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
break;
case RTL_GIGA_MAC_VER_40:
+ RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF);
+ break;
case RTL_GIGA_MAC_VER_41:
case RTL_GIGA_MAC_VER_42:
case RTL_GIGA_MAC_VER_43:
@@ -4897,6 +4900,21 @@
PCI_EXP_LNKCTL_CLKREQ_EN);
}
+static void rtl_pcie_state_l2l3_enable(struct rtl8169_private *tp, bool enable)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+ u8 data;
+
+ data = RTL_R8(Config3);
+
+ if (enable)
+ data |= Rdy_to_L23;
+ else
+ data &= ~Rdy_to_L23;
+
+ RTL_W8(Config3, data);
+}
+
#define R8168_CPCMD_QUIRK_MASK (\
EnableBist | \
Mac_dbgo_oe | \
@@ -5246,6 +5264,7 @@
};
rtl_hw_start_8168f(tp);
+ rtl_pcie_state_l2l3_enable(tp, false);
rtl_ephy_init(tp, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1));
@@ -5284,6 +5303,8 @@
rtl_w1w0_eri(tp, 0x2fc, ERIAR_MASK_0001, 0x01, 0x06, ERIAR_EXGMAC);
rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, 0x1000, ERIAR_EXGMAC);
+
+ rtl_pcie_state_l2l3_enable(tp, false);
}
static void rtl_hw_start_8168g_2(struct rtl8169_private *tp)
@@ -5536,6 +5557,8 @@
RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
rtl_ephy_init(tp, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1));
+
+ rtl_pcie_state_l2l3_enable(tp, false);
}
static void rtl_hw_start_8105e_2(struct rtl8169_private *tp)
@@ -5571,6 +5594,8 @@
rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0e00, 0xff00, ERIAR_EXGMAC);
+
+ rtl_pcie_state_l2l3_enable(tp, false);
}
static void rtl_hw_start_8106(struct rtl8169_private *tp)
@@ -5583,6 +5608,8 @@
RTL_W32(MISC, (RTL_R32(MISC) | DISABLE_LAN_EN) & ~EARLY_TALLY_EN);
RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET);
RTL_W8(DLLPR, RTL_R8(DLLPR) & ~PFM_EN);
+
+ rtl_pcie_state_l2l3_enable(tp, false);
}
static void rtl_hw_start_8101(struct net_device *dev)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index b3e148e..9d37483 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -320,11 +320,8 @@
static void dwmac1000_ctrl_ane(void __iomem *ioaddr, bool restart)
{
- u32 value;
-
- value = readl(ioaddr + GMAC_AN_CTRL);
/* auto negotiation enable and External Loopback enable */
- value = GMAC_AN_CTRL_ANE | GMAC_AN_CTRL_ELE;
+ u32 value = GMAC_AN_CTRL_ANE | GMAC_AN_CTRL_ELE;
if (restart)
value |= GMAC_AN_CTRL_RAN;
diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
index 7e6628a..1e2bcf5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
@@ -145,7 +145,7 @@
x->rx_msg_type_delay_req++;
else if (p->des4.erx.msg_type == RDES_EXT_DELAY_RESP)
x->rx_msg_type_delay_resp++;
- else if (p->des4.erx.msg_type == RDES_EXT_DELAY_REQ)
+ else if (p->des4.erx.msg_type == RDES_EXT_PDELAY_REQ)
x->rx_msg_type_pdelay_req++;
else if (p->des4.erx.msg_type == RDES_EXT_PDELAY_RESP)
x->rx_msg_type_pdelay_resp++;
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index 1c24a8f..fd411d6 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -1083,6 +1083,24 @@
return vp;
}
+static void vnet_cleanup(void)
+{
+ struct vnet *vp;
+ struct net_device *dev;
+
+ mutex_lock(&vnet_list_mutex);
+ while (!list_empty(&vnet_list)) {
+ vp = list_first_entry(&vnet_list, struct vnet, list);
+ list_del(&vp->list);
+ dev = vp->dev;
+ /* vio_unregister_driver() should have cleaned up port_list */
+ BUG_ON(!list_empty(&vp->port_list));
+ unregister_netdev(dev);
+ free_netdev(dev);
+ }
+ mutex_unlock(&vnet_list_mutex);
+}
+
static const char *local_mac_prop = "local-mac-address";
static struct vnet *vnet_find_parent(struct mdesc_handle *hp,
@@ -1240,7 +1258,6 @@
kfree(port);
- unregister_netdev(vp->dev);
}
return 0;
}
@@ -1268,6 +1285,7 @@
static void __exit vnet_exit(void)
{
vio_unregister_driver(&vnet_port_driver);
+ vnet_cleanup();
}
module_init(vnet_init);
diff --git a/drivers/net/fddi/defxx.c b/drivers/net/fddi/defxx.c
index eb78203..2aa5727 100644
--- a/drivers/net/fddi/defxx.c
+++ b/drivers/net/fddi/defxx.c
@@ -291,7 +291,11 @@
static int dfx_rcv_init(DFX_board_t *bp, int get_buffers);
static void dfx_rcv_queue_process(DFX_board_t *bp);
+#ifdef DYNAMIC_BUFFERS
static void dfx_rcv_flush(DFX_board_t *bp);
+#else
+static inline void dfx_rcv_flush(DFX_board_t *bp) {}
+#endif
static netdev_tx_t dfx_xmt_queue_pkt(struct sk_buff *skb,
struct net_device *dev);
@@ -2849,7 +2853,7 @@
* Align an sk_buff to a boundary power of 2
*
*/
-
+#ifdef DYNAMIC_BUFFERS
static void my_skb_align(struct sk_buff *skb, int n)
{
unsigned long x = (unsigned long)skb->data;
@@ -2859,7 +2863,7 @@
skb_reserve(skb, v - x);
}
-
+#endif
/*
* ================
@@ -3074,10 +3078,7 @@
break;
}
else {
-#ifndef DYNAMIC_BUFFERS
- if (! rx_in_place)
-#endif
- {
+ if (!rx_in_place) {
/* Receive buffer allocated, pass receive packet up */
skb_copy_to_linear_data(skb,
@@ -3453,10 +3454,6 @@
}
}
-#else
-static inline void dfx_rcv_flush( DFX_board_t *bp )
-{
-}
#endif /* DYNAMIC_BUFFERS */
/*
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index 6a999e6..9408157 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -1323,15 +1323,15 @@
{
struct dp83640_private *dp83640 = phydev->priv;
- if (!dp83640->hwts_rx_en)
- return false;
-
if (is_status_frame(skb, type)) {
decode_status_frame(dp83640, skb);
kfree_skb(skb);
return true;
}
+ if (!dp83640->hwts_rx_en)
+ return false;
+
SKB_PTP_TYPE(skb) = type;
skb_queue_tail(&dp83640->rx_queue, skb);
schedule_work(&dp83640->ts_work);
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 2e58aa5..4eaadcf 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -187,6 +187,50 @@
return d ? to_mii_bus(d) : NULL;
}
EXPORT_SYMBOL(of_mdio_find_bus);
+
+/* Walk the list of subnodes of a mdio bus and look for a node that matches the
+ * phy's address with its 'reg' property. If found, set the of_node pointer for
+ * the phy. This allows auto-probed pyh devices to be supplied with information
+ * passed in via DT.
+ */
+static void of_mdiobus_link_phydev(struct mii_bus *mdio,
+ struct phy_device *phydev)
+{
+ struct device *dev = &phydev->dev;
+ struct device_node *child;
+
+ if (dev->of_node || !mdio->dev.of_node)
+ return;
+
+ for_each_available_child_of_node(mdio->dev.of_node, child) {
+ int addr;
+ int ret;
+
+ ret = of_property_read_u32(child, "reg", &addr);
+ if (ret < 0) {
+ dev_err(dev, "%s has invalid PHY address\n",
+ child->full_name);
+ continue;
+ }
+
+ /* A PHY must have a reg property in the range [0-31] */
+ if (addr >= PHY_MAX_ADDR) {
+ dev_err(dev, "%s PHY address %i is too large\n",
+ child->full_name, addr);
+ continue;
+ }
+
+ if (addr == phydev->addr) {
+ dev->of_node = child;
+ return;
+ }
+ }
+}
+#else /* !IS_ENABLED(CONFIG_OF_MDIO) */
+static inline void of_mdiobus_link_phydev(struct mii_bus *mdio,
+ struct phy_device *phydev)
+{
+}
#endif
/**
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 91d6c12..d5b77ef 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -539,7 +539,7 @@
{
struct sock_fprog uprog;
struct sock_filter *code = NULL;
- int len, err;
+ int len;
if (copy_from_user(&uprog, arg, sizeof(uprog)))
return -EFAULT;
@@ -554,12 +554,6 @@
if (IS_ERR(code))
return PTR_ERR(code);
- err = sk_chk_filter(code, uprog.len);
- if (err) {
- kfree(code);
- return err;
- }
-
*p = code;
return uprog.len;
}
@@ -763,10 +757,15 @@
};
ppp_lock(ppp);
- if (ppp->pass_filter)
+ if (ppp->pass_filter) {
sk_unattached_filter_destroy(ppp->pass_filter);
- err = sk_unattached_filter_create(&ppp->pass_filter,
- &fprog);
+ ppp->pass_filter = NULL;
+ }
+ if (fprog.filter != NULL)
+ err = sk_unattached_filter_create(&ppp->pass_filter,
+ &fprog);
+ else
+ err = 0;
kfree(code);
ppp_unlock(ppp);
}
@@ -784,10 +783,15 @@
};
ppp_lock(ppp);
- if (ppp->active_filter)
+ if (ppp->active_filter) {
sk_unattached_filter_destroy(ppp->active_filter);
- err = sk_unattached_filter_create(&ppp->active_filter,
- &fprog);
+ ppp->active_filter = NULL;
+ }
+ if (fprog.filter != NULL)
+ err = sk_unattached_filter_create(&ppp->active_filter,
+ &fprog);
+ else
+ err = 0;
kfree(code);
ppp_unlock(ppp);
}
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index 2ea7efd..6c9c16d 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -675,7 +675,7 @@
po->chan.hdrlen = (sizeof(struct pppoe_hdr) +
dev->hard_header_len);
- po->chan.mtu = dev->mtu - sizeof(struct pppoe_hdr);
+ po->chan.mtu = dev->mtu - sizeof(struct pppoe_hdr) - 2;
po->chan.private = sk;
po->chan.ops = &pppoe_chan_ops;
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index a3a0586..a4272ed 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -258,10 +258,8 @@
* so as not to drop characters on the floor.
*/
int curr_rx_urb_idx;
- u16 curr_rx_urb_offset;
u8 rx_urb_filled[MAX_RX_URBS];
struct tasklet_struct unthrottle_tasklet;
- struct work_struct retry_unthrottle_workqueue;
};
struct hso_device {
@@ -1252,14 +1250,6 @@
tasklet_hi_schedule(&serial->unthrottle_tasklet);
}
-static void hso_unthrottle_workfunc(struct work_struct *work)
-{
- struct hso_serial *serial =
- container_of(work, struct hso_serial,
- retry_unthrottle_workqueue);
- hso_unthrottle_tasklet(serial);
-}
-
/* open the requested serial port */
static int hso_serial_open(struct tty_struct *tty, struct file *filp)
{
@@ -1295,8 +1285,6 @@
tasklet_init(&serial->unthrottle_tasklet,
(void (*)(unsigned long))hso_unthrottle_tasklet,
(unsigned long)serial);
- INIT_WORK(&serial->retry_unthrottle_workqueue,
- hso_unthrottle_workfunc);
result = hso_start_serial_device(serial->parent, GFP_KERNEL);
if (result) {
hso_stop_serial_device(serial->parent);
@@ -1345,7 +1333,6 @@
if (!usb_gone)
hso_stop_serial_device(serial->parent);
tasklet_kill(&serial->unthrottle_tasklet);
- cancel_work_sync(&serial->retry_unthrottle_workqueue);
}
if (!usb_gone)
@@ -2013,8 +2000,7 @@
static int put_rxbuf_data(struct urb *urb, struct hso_serial *serial)
{
struct tty_struct *tty;
- int write_length_remaining = 0;
- int curr_write_len;
+ int count;
/* Sanity check */
if (urb == NULL || serial == NULL) {
@@ -2024,29 +2010,28 @@
tty = tty_port_tty_get(&serial->port);
- /* Push data to tty */
- write_length_remaining = urb->actual_length -
- serial->curr_rx_urb_offset;
- D1("data to push to tty");
- while (write_length_remaining) {
- if (tty && test_bit(TTY_THROTTLED, &tty->flags)) {
- tty_kref_put(tty);
- return -1;
- }
- curr_write_len = tty_insert_flip_string(&serial->port,
- urb->transfer_buffer + serial->curr_rx_urb_offset,
- write_length_remaining);
- serial->curr_rx_urb_offset += curr_write_len;
- write_length_remaining -= curr_write_len;
- tty_flip_buffer_push(&serial->port);
+ if (tty && test_bit(TTY_THROTTLED, &tty->flags)) {
+ tty_kref_put(tty);
+ return -1;
}
+
+ /* Push data to tty */
+ D1("data to push to tty");
+ count = tty_buffer_request_room(&serial->port, urb->actual_length);
+ if (count >= urb->actual_length) {
+ tty_insert_flip_string(&serial->port, urb->transfer_buffer,
+ urb->actual_length);
+ tty_flip_buffer_push(&serial->port);
+ } else {
+ dev_warn(&serial->parent->usb->dev,
+ "dropping data, %d bytes lost\n", urb->actual_length);
+ }
+
tty_kref_put(tty);
- if (write_length_remaining == 0) {
- serial->curr_rx_urb_offset = 0;
- serial->rx_urb_filled[hso_urb_to_index(serial, urb)] = 0;
- }
- return write_length_remaining;
+ serial->rx_urb_filled[hso_urb_to_index(serial, urb)] = 0;
+
+ return 0;
}
@@ -2217,7 +2202,6 @@
}
}
serial->curr_rx_urb_idx = 0;
- serial->curr_rx_urb_offset = 0;
if (serial->tx_urb)
usb_kill_urb(serial->tx_urb);
diff --git a/drivers/net/usb/huawei_cdc_ncm.c b/drivers/net/usb/huawei_cdc_ncm.c
index 5d95a13..735f7da 100644
--- a/drivers/net/usb/huawei_cdc_ncm.c
+++ b/drivers/net/usb/huawei_cdc_ncm.c
@@ -194,6 +194,9 @@
{ USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x76),
.driver_info = (unsigned long)&huawei_cdc_ncm_info,
},
+ { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x03, 0x16),
+ .driver_info = (unsigned long)&huawei_cdc_ncm_info,
+ },
/* Terminating entry */
{
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index cf62d7e..22756db 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -667,6 +667,7 @@
{QMI_FIXED_INTF(0x05c6, 0x9084, 4)},
{QMI_FIXED_INTF(0x05c6, 0x920d, 0)},
{QMI_FIXED_INTF(0x05c6, 0x920d, 5)},
+ {QMI_FIXED_INTF(0x0846, 0x68a2, 8)},
{QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */
{QMI_FIXED_INTF(0x12d1, 0x14ac, 1)}, /* Huawei E1820 */
{QMI_FIXED_INTF(0x16d8, 0x6003, 0)}, /* CMOTech 6003 */
@@ -741,6 +742,7 @@
{QMI_FIXED_INTF(0x19d2, 0x1424, 2)},
{QMI_FIXED_INTF(0x19d2, 0x1425, 2)},
{QMI_FIXED_INTF(0x19d2, 0x1426, 2)}, /* ZTE MF91 */
+ {QMI_FIXED_INTF(0x19d2, 0x1428, 2)}, /* Telewell TW-LTE 4G v2 */
{QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */
{QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */
{QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */
@@ -756,6 +758,7 @@
{QMI_FIXED_INTF(0x1199, 0x9054, 8)}, /* Sierra Wireless Modem */
{QMI_FIXED_INTF(0x1199, 0x9055, 8)}, /* Netgear AirCard 341U */
{QMI_FIXED_INTF(0x1199, 0x9056, 8)}, /* Sierra Wireless Modem */
+ {QMI_FIXED_INTF(0x1199, 0x9057, 8)},
{QMI_FIXED_INTF(0x1199, 0x9061, 8)}, /* Sierra Wireless Modem */
{QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
{QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 2543196..7bad2d3 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -1359,7 +1359,7 @@
struct sk_buff_head seg_list;
struct sk_buff *segs, *nskb;
- features &= ~(NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO);
+ features &= ~(NETIF_F_SG | NETIF_F_IPV6_CSUM | NETIF_F_TSO6);
segs = skb_gso_segment(skb, features);
if (IS_ERR(segs) || !segs)
goto drop;
@@ -3204,8 +3204,13 @@
struct r8152 *tp = netdev_priv(dev);
struct tally_counter tally;
+ if (usb_autopm_get_interface(tp->intf) < 0)
+ return;
+
generic_ocp_read(tp, PLA_TALLYCNT, sizeof(tally), &tally, MCU_TYPE_PLA);
+ usb_autopm_put_interface(tp->intf);
+
data[0] = le64_to_cpu(tally.tx_packets);
data[1] = le64_to_cpu(tally.rx_packets);
data[2] = le64_to_cpu(tally.tx_errors);
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 424db65e..d07bf4c 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -1714,6 +1714,18 @@
return ret;
}
+static int smsc95xx_reset_resume(struct usb_interface *intf)
+{
+ struct usbnet *dev = usb_get_intfdata(intf);
+ int ret;
+
+ ret = smsc95xx_reset(dev);
+ if (ret < 0)
+ return ret;
+
+ return smsc95xx_resume(intf);
+}
+
static void smsc95xx_rx_csum_offload(struct sk_buff *skb)
{
skb->csum = *(u16 *)(skb_tail_pointer(skb) - 2);
@@ -2004,7 +2016,7 @@
.probe = usbnet_probe,
.suspend = smsc95xx_suspend,
.resume = smsc95xx_resume,
- .reset_resume = smsc95xx_resume,
+ .reset_resume = smsc95xx_reset_resume,
.disconnect = usbnet_disconnect,
.disable_hub_initiated_lpm = 1,
.supports_autosuspend = 1,
diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
index 93ace04..1f04127 100644
--- a/drivers/net/wan/farsync.c
+++ b/drivers/net/wan/farsync.c
@@ -2363,7 +2363,7 @@
"FarSync TE1"
};
-static void
+static int
fst_init_card(struct fst_card_info *card)
{
int i;
@@ -2374,24 +2374,21 @@
* we'll have to revise it in some way then.
*/
for (i = 0; i < card->nports; i++) {
- err = register_hdlc_device(card->ports[i].dev);
- if (err < 0) {
- int j;
+ err = register_hdlc_device(card->ports[i].dev);
+ if (err < 0) {
pr_err("Cannot register HDLC device for port %d (errno %d)\n",
- i, -err);
- for (j = i; j < card->nports; j++) {
- free_netdev(card->ports[j].dev);
- card->ports[j].dev = NULL;
- }
- card->nports = i;
- break;
- }
+ i, -err);
+ while (i--)
+ unregister_hdlc_device(card->ports[i].dev);
+ return err;
+ }
}
pr_info("%s-%s: %s IRQ%d, %d ports\n",
port_to_dev(&card->ports[0])->name,
port_to_dev(&card->ports[card->nports - 1])->name,
type_strings[card->type], card->irq, card->nports);
+ return 0;
}
static const struct net_device_ops fst_ops = {
@@ -2447,15 +2444,12 @@
/* Try to enable the device */
if ((err = pci_enable_device(pdev)) != 0) {
pr_err("Failed to enable card. Err %d\n", -err);
- kfree(card);
- return err;
+ goto enable_fail;
}
if ((err = pci_request_regions(pdev, "FarSync")) !=0) {
pr_err("Failed to allocate regions. Err %d\n", -err);
- pci_disable_device(pdev);
- kfree(card);
- return err;
+ goto regions_fail;
}
/* Get virtual addresses of memory regions */
@@ -2464,30 +2458,21 @@
card->phys_ctlmem = pci_resource_start(pdev, 3);
if ((card->mem = ioremap(card->phys_mem, FST_MEMSIZE)) == NULL) {
pr_err("Physical memory remap failed\n");
- pci_release_regions(pdev);
- pci_disable_device(pdev);
- kfree(card);
- return -ENODEV;
+ err = -ENODEV;
+ goto ioremap_physmem_fail;
}
if ((card->ctlmem = ioremap(card->phys_ctlmem, 0x10)) == NULL) {
pr_err("Control memory remap failed\n");
- pci_release_regions(pdev);
- pci_disable_device(pdev);
- iounmap(card->mem);
- kfree(card);
- return -ENODEV;
+ err = -ENODEV;
+ goto ioremap_ctlmem_fail;
}
dbg(DBG_PCI, "kernel mem %p, ctlmem %p\n", card->mem, card->ctlmem);
/* Register the interrupt handler */
if (request_irq(pdev->irq, fst_intr, IRQF_SHARED, FST_DEV_NAME, card)) {
pr_err("Unable to register interrupt %d\n", card->irq);
- pci_release_regions(pdev);
- pci_disable_device(pdev);
- iounmap(card->ctlmem);
- iounmap(card->mem);
- kfree(card);
- return -ENODEV;
+ err = -ENODEV;
+ goto irq_fail;
}
/* Record info we need */
@@ -2513,13 +2498,8 @@
while (i--)
free_netdev(card->ports[i].dev);
pr_err("FarSync: out of memory\n");
- free_irq(card->irq, card);
- pci_release_regions(pdev);
- pci_disable_device(pdev);
- iounmap(card->ctlmem);
- iounmap(card->mem);
- kfree(card);
- return -ENODEV;
+ err = -ENOMEM;
+ goto hdlcdev_fail;
}
card->ports[i].dev = dev;
card->ports[i].card = card;
@@ -2565,9 +2545,16 @@
pci_set_drvdata(pdev, card);
/* Remainder of card setup */
+ if (no_of_cards_added >= FST_MAX_CARDS) {
+ pr_err("FarSync: too many cards\n");
+ err = -ENOMEM;
+ goto card_array_fail;
+ }
fst_card_array[no_of_cards_added] = card;
card->card_no = no_of_cards_added++; /* Record instance and bump it */
- fst_init_card(card);
+ err = fst_init_card(card);
+ if (err)
+ goto init_card_fail;
if (card->family == FST_FAMILY_TXU) {
/*
* Allocate a dma buffer for transmit and receives
@@ -2577,29 +2564,46 @@
&card->rx_dma_handle_card);
if (card->rx_dma_handle_host == NULL) {
pr_err("Could not allocate rx dma buffer\n");
- fst_disable_intr(card);
- pci_release_regions(pdev);
- pci_disable_device(pdev);
- iounmap(card->ctlmem);
- iounmap(card->mem);
- kfree(card);
- return -ENOMEM;
+ err = -ENOMEM;
+ goto rx_dma_fail;
}
card->tx_dma_handle_host =
pci_alloc_consistent(card->device, FST_MAX_MTU,
&card->tx_dma_handle_card);
if (card->tx_dma_handle_host == NULL) {
pr_err("Could not allocate tx dma buffer\n");
- fst_disable_intr(card);
- pci_release_regions(pdev);
- pci_disable_device(pdev);
- iounmap(card->ctlmem);
- iounmap(card->mem);
- kfree(card);
- return -ENOMEM;
+ err = -ENOMEM;
+ goto tx_dma_fail;
}
}
return 0; /* Success */
+
+tx_dma_fail:
+ pci_free_consistent(card->device, FST_MAX_MTU,
+ card->rx_dma_handle_host,
+ card->rx_dma_handle_card);
+rx_dma_fail:
+ fst_disable_intr(card);
+ for (i = 0 ; i < card->nports ; i++)
+ unregister_hdlc_device(card->ports[i].dev);
+init_card_fail:
+ fst_card_array[card->card_no] = NULL;
+card_array_fail:
+ for (i = 0 ; i < card->nports ; i++)
+ free_netdev(card->ports[i].dev);
+hdlcdev_fail:
+ free_irq(card->irq, card);
+irq_fail:
+ iounmap(card->ctlmem);
+ioremap_ctlmem_fail:
+ iounmap(card->mem);
+ioremap_physmem_fail:
+ pci_release_regions(pdev);
+regions_fail:
+ pci_disable_device(pdev);
+enable_fail:
+ kfree(card);
+ return err;
}
/*
diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c
index 5895f19..fa9fdfa 100644
--- a/drivers/net/wan/x25_asy.c
+++ b/drivers/net/wan/x25_asy.c
@@ -122,8 +122,12 @@
{
struct x25_asy *sl = netdev_priv(dev);
unsigned char *xbuff, *rbuff;
- int len = 2 * newmtu;
+ int len;
+ if (newmtu > 65534)
+ return -EINVAL;
+
+ len = 2 * newmtu;
xbuff = kmalloc(len + 4, GFP_ATOMIC);
rbuff = kmalloc(len + 4, GFP_ATOMIC);
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index 82017f5..e6c56c5 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -795,7 +795,11 @@
if (status)
goto err_htc_stop;
- ar->free_vdev_map = (1 << TARGET_NUM_VDEVS) - 1;
+ if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features))
+ ar->free_vdev_map = (1 << TARGET_10X_NUM_VDEVS) - 1;
+ else
+ ar->free_vdev_map = (1 << TARGET_NUM_VDEVS) - 1;
+
INIT_LIST_HEAD(&ar->arvifs);
if (!test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags))
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index 6c102b1..eebc860 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -312,7 +312,6 @@
int msdu_len, msdu_chaining = 0;
struct sk_buff *msdu;
struct htt_rx_desc *rx_desc;
- bool corrupted = false;
lockdep_assert_held(&htt->rx_ring.lock);
@@ -439,9 +438,6 @@
last_msdu = __le32_to_cpu(rx_desc->msdu_end.info0) &
RX_MSDU_END_INFO0_LAST_MSDU;
- if (msdu_chaining && !last_msdu)
- corrupted = true;
-
if (last_msdu) {
msdu->next = NULL;
break;
@@ -457,20 +453,6 @@
msdu_chaining = -1;
/*
- * Apparently FW sometimes reports weird chained MSDU sequences with
- * more than one rx descriptor. This seems like a bug but needs more
- * analyzing. For the time being fix it by dropping such sequences to
- * avoid blowing up the host system.
- */
- if (corrupted) {
- ath10k_warn("failed to pop chained msdus, dropping\n");
- ath10k_htt_rx_free_msdu_chain(*head_msdu);
- *head_msdu = NULL;
- *tail_msdu = NULL;
- msdu_chaining = -EINVAL;
- }
-
- /*
* Don't refill the ring yet.
*
* First, the elements popped here are still in use - it is not
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
index 6db51a6..d06fcb0 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
@@ -1184,8 +1184,6 @@
bus->bus_priv.usb = bus_pub;
dev_set_drvdata(dev, bus);
bus->ops = &brcmf_usb_bus_ops;
- bus->chip = bus_pub->devid;
- bus->chiprev = bus_pub->chiprev;
bus->proto_type = BRCMF_PROTO_BCDC;
bus->always_use_fws_queue = true;
@@ -1194,6 +1192,9 @@
if (ret)
goto fail;
}
+ bus->chip = bus_pub->devid;
+ bus->chiprev = bus_pub->chiprev;
+
/* request firmware here */
brcmf_fw_get_firmwares(dev, 0, brcmf_usb_get_fwname(devinfo), NULL,
brcmf_usb_probe_phase2);
diff --git a/drivers/net/wireless/iwlwifi/dvm/rxon.c b/drivers/net/wireless/iwlwifi/dvm/rxon.c
index ed50de6..6dc5dd3 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rxon.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rxon.c
@@ -1068,13 +1068,6 @@
/* recalculate basic rates */
iwl_calc_basic_rates(priv, ctx);
- /*
- * force CTS-to-self frames protection if RTS-CTS is not preferred
- * one aggregation protection method
- */
- if (!priv->hw_params.use_rts_for_aggregation)
- ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
-
if ((ctx->vif && ctx->vif->bss_conf.use_short_slot) ||
!(ctx->staging.flags & RXON_FLG_BAND_24G_MSK))
ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
@@ -1480,11 +1473,6 @@
else
ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
- if (bss_conf->use_cts_prot)
- ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
- else
- ctx->staging.flags &= ~RXON_FLG_SELF_CTS_EN;
-
memcpy(ctx->staging.bssid_addr, bss_conf->bssid, ETH_ALEN);
if (vif->type == NL80211_IFTYPE_AP ||
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw.h b/drivers/net/wireless/iwlwifi/iwl-fw.h
index 0aa7c00..b1a3332 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw.h
@@ -88,6 +88,7 @@
* P2P client interfaces simultaneously if they are in different bindings.
* @IWL_UCODE_TLV_FLAGS_P2P_BSS_PS_SCM: support power save on BSS station and
* P2P client interfaces simultaneously if they are in same bindings.
+ * @IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT: General support for uAPSD
* @IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD: P2P client supports uAPSD power save
* @IWL_UCODE_TLV_FLAGS_BCAST_FILTERING: uCode supports broadcast filtering.
* @IWL_UCODE_TLV_FLAGS_GO_UAPSD: AP/GO interfaces support uAPSD clients
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
index 8b53027..725ba49 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
@@ -667,10 +667,9 @@
if (vif->bss_conf.qos)
cmd->qos_flags |= cpu_to_le32(MAC_QOS_FLG_UPDATE_EDCA);
- if (vif->bss_conf.use_cts_prot) {
+ if (vif->bss_conf.use_cts_prot)
cmd->protection_flags |= cpu_to_le32(MAC_PROT_FLG_TGG_PROTECT);
- cmd->protection_flags |= cpu_to_le32(MAC_PROT_FLG_SELF_CTS_EN);
- }
+
IWL_DEBUG_RATE(mvm, "use_cts_prot %d, ht_operation_mode %d\n",
vif->bss_conf.use_cts_prot,
vif->bss_conf.ht_operation_mode);
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index 7215f59..9bfb906 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -303,6 +303,13 @@
hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
}
+ if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT &&
+ !iwlwifi_mod_params.uapsd_disable) {
+ hw->flags |= IEEE80211_HW_SUPPORTS_UAPSD;
+ hw->uapsd_queues = IWL_UAPSD_AC_INFO;
+ hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
+ }
+
hw->sta_data_size = sizeof(struct iwl_mvm_sta);
hw->vif_data_size = sizeof(struct iwl_mvm_vif);
hw->chanctx_data_size = sizeof(u16);
@@ -1159,8 +1166,12 @@
bcast_mac = &cmd->macs[mvmvif->id];
- /* enable filtering only for associated stations */
- if (vif->type != NL80211_IFTYPE_STATION || !vif->bss_conf.assoc)
+ /*
+ * enable filtering only for associated stations, but not for P2P
+ * Clients
+ */
+ if (vif->type != NL80211_IFTYPE_STATION || vif->p2p ||
+ !vif->bss_conf.assoc)
return;
bcast_mac->default_discard = 1;
@@ -1237,10 +1248,6 @@
if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING))
return 0;
- /* bcast filtering isn't supported for P2P client */
- if (vif->p2p)
- return 0;
-
if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd))
return 0;
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
index 4b6c7d4..eac2b42 100644
--- a/drivers/net/wireless/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
@@ -588,9 +588,7 @@
struct iwl_scan_offload_cmd *scan,
struct iwl_mvm_scan_params *params)
{
- scan->channel_count =
- mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels +
- mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels;
+ scan->channel_count = req->n_channels;
scan->quiet_time = cpu_to_le16(IWL_ACTIVE_QUIET_TIME);
scan->quiet_plcp_th = cpu_to_le16(IWL_PLCP_QUIET_THRESH);
scan->good_CRC_th = IWL_GOOD_CRC_TH_DEFAULT;
@@ -669,61 +667,37 @@
struct cfg80211_sched_scan_request *req,
struct iwl_scan_channel_cfg *channels,
enum ieee80211_band band,
- int *head, int *tail,
+ int *head,
u32 ssid_bitmap,
struct iwl_mvm_scan_params *params)
{
- struct ieee80211_supported_band *s_band;
- int n_channels = req->n_channels;
- int i, j, index = 0;
- bool partial;
+ int i, index = 0;
- /*
- * We have to configure all supported channels, even if we don't want to
- * scan on them, but we have to send channels in the order that we want
- * to scan. So add requested channels to head of the list and others to
- * the end.
- */
- s_band = &mvm->nvm_data->bands[band];
+ for (i = 0; i < req->n_channels; i++) {
+ struct ieee80211_channel *chan = req->channels[i];
- for (i = 0; i < s_band->n_channels && *head <= *tail; i++) {
- partial = false;
- for (j = 0; j < n_channels; j++)
- if (s_band->channels[i].center_freq ==
- req->channels[j]->center_freq) {
- index = *head;
- (*head)++;
- /*
- * Channels that came with the request will be
- * in partial scan .
- */
- partial = true;
- break;
- }
- if (!partial) {
- index = *tail;
- (*tail)--;
- }
- channels->channel_number[index] =
- cpu_to_le16(ieee80211_frequency_to_channel(
- s_band->channels[i].center_freq));
+ if (chan->band != band)
+ continue;
+
+ index = *head;
+ (*head)++;
+
+ channels->channel_number[index] = cpu_to_le16(chan->hw_value);
channels->dwell_time[index][0] = params->dwell[band].active;
channels->dwell_time[index][1] = params->dwell[band].passive;
channels->iter_count[index] = cpu_to_le16(1);
channels->iter_interval[index] = 0;
- if (!(s_band->channels[i].flags & IEEE80211_CHAN_NO_IR))
+ if (!(chan->flags & IEEE80211_CHAN_NO_IR))
channels->type[index] |=
cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_ACTIVE);
channels->type[index] |=
- cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_FULL);
- if (partial)
- channels->type[index] |=
- cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_PARTIAL);
+ cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_FULL |
+ IWL_SCAN_OFFLOAD_CHANNEL_PARTIAL);
- if (s_band->channels[i].flags & IEEE80211_CHAN_NO_HT40)
+ if (chan->flags & IEEE80211_CHAN_NO_HT40)
channels->type[index] |=
cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_NARROW);
@@ -740,7 +714,6 @@
int band_2ghz = mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels;
int band_5ghz = mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels;
int head = 0;
- int tail = band_2ghz + band_5ghz - 1;
u32 ssid_bitmap;
int cmd_len;
int ret;
@@ -772,7 +745,7 @@
&scan_cfg->scan_cmd.tx_cmd[0],
scan_cfg->data);
iwl_build_channel_cfg(mvm, req, &scan_cfg->channel_cfg,
- IEEE80211_BAND_2GHZ, &head, &tail,
+ IEEE80211_BAND_2GHZ, &head,
ssid_bitmap, ¶ms);
}
if (band_5ghz) {
@@ -782,7 +755,7 @@
scan_cfg->data +
SCAN_OFFLOAD_PROBE_REQ_SIZE);
iwl_build_channel_cfg(mvm, req, &scan_cfg->channel_cfg,
- IEEE80211_BAND_5GHZ, &head, &tail,
+ IEEE80211_BAND_5GHZ, &head,
ssid_bitmap, ¶ms);
}
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index 7091a18..98950e4 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -367,6 +367,7 @@
{IWL_PCI_DEVICE(0x095A, 0x5012, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x5412, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x5410, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5510, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x5400, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x1010, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x5000, iwl7265_2n_cfg)},
@@ -380,7 +381,7 @@
{IWL_PCI_DEVICE(0x095A, 0x9110, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x9112, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x9210, iwl7265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x095A, 0x9200, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095B, 0x9200, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x9510, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x9310, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x9410, iwl7265_2ac_cfg)},
diff --git a/drivers/net/wireless/mwifiex/11n_aggr.c b/drivers/net/wireless/mwifiex/11n_aggr.c
index 5b32106..fe0f66f 100644
--- a/drivers/net/wireless/mwifiex/11n_aggr.c
+++ b/drivers/net/wireless/mwifiex/11n_aggr.c
@@ -185,6 +185,7 @@
skb_reserve(skb_aggr, headroom + sizeof(struct txpd));
tx_info_aggr = MWIFIEX_SKB_TXCB(skb_aggr);
+ memset(tx_info_aggr, 0, sizeof(*tx_info_aggr));
tx_info_aggr->bss_type = tx_info_src->bss_type;
tx_info_aggr->bss_num = tx_info_src->bss_num;
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
index e95dec9..b511613 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/mwifiex/cfg80211.c
@@ -220,6 +220,7 @@
}
tx_info = MWIFIEX_SKB_TXCB(skb);
+ memset(tx_info, 0, sizeof(*tx_info));
tx_info->bss_num = priv->bss_num;
tx_info->bss_type = priv->bss_type;
tx_info->pkt_len = pkt_len;
diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c
index 8dee6c8..c161141 100644
--- a/drivers/net/wireless/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/mwifiex/cmdevt.c
@@ -453,6 +453,7 @@
if (skb) {
rx_info = MWIFIEX_SKB_RXCB(skb);
+ memset(rx_info, 0, sizeof(*rx_info));
rx_info->bss_num = priv->bss_num;
rx_info->bss_type = priv->bss_type;
}
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
index cbabc12..e91cd0f 100644
--- a/drivers/net/wireless/mwifiex/main.c
+++ b/drivers/net/wireless/mwifiex/main.c
@@ -645,6 +645,7 @@
}
tx_info = MWIFIEX_SKB_TXCB(skb);
+ memset(tx_info, 0, sizeof(*tx_info));
tx_info->bss_num = priv->bss_num;
tx_info->bss_type = priv->bss_type;
tx_info->pkt_len = skb->len;
diff --git a/drivers/net/wireless/mwifiex/sta_tx.c b/drivers/net/wireless/mwifiex/sta_tx.c
index 5fce7e7..70eb863 100644
--- a/drivers/net/wireless/mwifiex/sta_tx.c
+++ b/drivers/net/wireless/mwifiex/sta_tx.c
@@ -150,6 +150,7 @@
return -1;
tx_info = MWIFIEX_SKB_TXCB(skb);
+ memset(tx_info, 0, sizeof(*tx_info));
tx_info->bss_num = priv->bss_num;
tx_info->bss_type = priv->bss_type;
tx_info->pkt_len = data_len - (sizeof(struct txpd) + INTF_HEADER_LEN);
diff --git a/drivers/net/wireless/mwifiex/tdls.c b/drivers/net/wireless/mwifiex/tdls.c
index e73034f..0e88364 100644
--- a/drivers/net/wireless/mwifiex/tdls.c
+++ b/drivers/net/wireless/mwifiex/tdls.c
@@ -605,6 +605,7 @@
}
tx_info = MWIFIEX_SKB_TXCB(skb);
+ memset(tx_info, 0, sizeof(*tx_info));
tx_info->bss_num = priv->bss_num;
tx_info->bss_type = priv->bss_type;
@@ -760,6 +761,7 @@
skb->priority = MWIFIEX_PRIO_VI;
tx_info = MWIFIEX_SKB_TXCB(skb);
+ memset(tx_info, 0, sizeof(*tx_info));
tx_info->bss_num = priv->bss_num;
tx_info->bss_type = priv->bss_type;
tx_info->flags |= MWIFIEX_BUF_FLAG_TDLS_PKT;
diff --git a/drivers/net/wireless/mwifiex/txrx.c b/drivers/net/wireless/mwifiex/txrx.c
index 37f26af..fd7e5b9 100644
--- a/drivers/net/wireless/mwifiex/txrx.c
+++ b/drivers/net/wireless/mwifiex/txrx.c
@@ -55,6 +55,7 @@
return -1;
}
+ memset(rx_info, 0, sizeof(*rx_info));
rx_info->bss_num = priv->bss_num;
rx_info->bss_type = priv->bss_type;
diff --git a/drivers/net/wireless/mwifiex/uap_txrx.c b/drivers/net/wireless/mwifiex/uap_txrx.c
index 9a56bc6..b0601b9 100644
--- a/drivers/net/wireless/mwifiex/uap_txrx.c
+++ b/drivers/net/wireless/mwifiex/uap_txrx.c
@@ -175,6 +175,7 @@
}
tx_info = MWIFIEX_SKB_TXCB(skb);
+ memset(tx_info, 0, sizeof(*tx_info));
tx_info->bss_num = priv->bss_num;
tx_info->bss_type = priv->bss_type;
tx_info->flags |= MWIFIEX_BUF_FLAG_BRIDGED_PKT;
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index e11dab2..832006b 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -231,9 +231,12 @@
*/
static int rt2800usb_autorun_detect(struct rt2x00_dev *rt2x00dev)
{
- __le32 reg;
+ __le32 *reg;
u32 fw_mode;
+ reg = kmalloc(sizeof(*reg), GFP_KERNEL);
+ if (reg == NULL)
+ return -ENOMEM;
/* cannot use rt2x00usb_register_read here as it uses different
* mode (MULTI_READ vs. DEVICE_MODE) and does not pass the
* magic value USB_MODE_AUTORUN (0x11) to the device, thus the
@@ -241,8 +244,9 @@
*/
rt2x00usb_vendor_request(rt2x00dev, USB_DEVICE_MODE,
USB_VENDOR_REQUEST_IN, 0, USB_MODE_AUTORUN,
- ®, sizeof(reg), REGISTER_TIMEOUT_FIRMWARE);
- fw_mode = le32_to_cpu(reg);
+ reg, sizeof(*reg), REGISTER_TIMEOUT_FIRMWARE);
+ fw_mode = le32_to_cpu(*reg);
+ kfree(reg);
if ((fw_mode & 0x00000003) == 2)
return 1;
@@ -261,6 +265,7 @@
int status;
u32 offset;
u32 length;
+ int retval;
/*
* Check which section of the firmware we need.
@@ -278,7 +283,10 @@
/*
* Write firmware to device.
*/
- if (rt2800usb_autorun_detect(rt2x00dev)) {
+ retval = rt2800usb_autorun_detect(rt2x00dev);
+ if (retval < 0)
+ return retval;
+ if (retval) {
rt2x00_info(rt2x00dev,
"Firmware loading not required - NIC in AutoRun mode\n");
} else {
@@ -763,7 +771,12 @@
*/
static int rt2800usb_efuse_detect(struct rt2x00_dev *rt2x00dev)
{
- if (rt2800usb_autorun_detect(rt2x00dev))
+ int retval;
+
+ retval = rt2800usb_autorun_detect(rt2x00dev);
+ if (retval < 0)
+ return retval;
+ if (retval)
return 1;
return rt2800_efuse_detect(rt2x00dev);
}
@@ -772,7 +785,10 @@
{
int retval;
- if (rt2800usb_efuse_detect(rt2x00dev))
+ retval = rt2800usb_efuse_detect(rt2x00dev);
+ if (retval < 0)
+ return retval;
+ if (retval)
retval = rt2800_read_eeprom_efuse(rt2x00dev);
else
retval = rt2x00usb_eeprom_read(rt2x00dev, rt2x00dev->eeprom,
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 1844a47..c65b636 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -1030,14 +1030,21 @@
{
struct gnttab_map_grant_ref *gop_map = *gopp_map;
u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
+ /* This always points to the shinfo of the skb being checked, which
+ * could be either the first or the one on the frag_list
+ */
struct skb_shared_info *shinfo = skb_shinfo(skb);
+ /* If this is non-NULL, we are currently checking the frag_list skb, and
+ * this points to the shinfo of the first one
+ */
+ struct skb_shared_info *first_shinfo = NULL;
int nr_frags = shinfo->nr_frags;
+ const bool sharedslot = nr_frags &&
+ frag_get_pending_idx(&shinfo->frags[0]) == pending_idx;
int i, err;
- struct sk_buff *first_skb = NULL;
/* Check status of header. */
err = (*gopp_copy)->status;
- (*gopp_copy)++;
if (unlikely(err)) {
if (net_ratelimit())
netdev_dbg(queue->vif->dev,
@@ -1045,8 +1052,12 @@
(*gopp_copy)->status,
pending_idx,
(*gopp_copy)->source.u.ref);
- xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_ERROR);
+ /* The first frag might still have this slot mapped */
+ if (!sharedslot)
+ xenvif_idx_release(queue, pending_idx,
+ XEN_NETIF_RSP_ERROR);
}
+ (*gopp_copy)++;
check_frags:
for (i = 0; i < nr_frags; i++, gop_map++) {
@@ -1062,8 +1073,19 @@
pending_idx,
gop_map->handle);
/* Had a previous error? Invalidate this fragment. */
- if (unlikely(err))
+ if (unlikely(err)) {
xenvif_idx_unmap(queue, pending_idx);
+ /* If the mapping of the first frag was OK, but
+ * the header's copy failed, and they are
+ * sharing a slot, send an error
+ */
+ if (i == 0 && sharedslot)
+ xenvif_idx_release(queue, pending_idx,
+ XEN_NETIF_RSP_ERROR);
+ else
+ xenvif_idx_release(queue, pending_idx,
+ XEN_NETIF_RSP_OKAY);
+ }
continue;
}
@@ -1075,42 +1097,53 @@
gop_map->status,
pending_idx,
gop_map->ref);
+
xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_ERROR);
/* Not the first error? Preceding frags already invalidated. */
if (err)
continue;
- /* First error: invalidate preceding fragments. */
+
+ /* First error: if the header haven't shared a slot with the
+ * first frag, release it as well.
+ */
+ if (!sharedslot)
+ xenvif_idx_release(queue,
+ XENVIF_TX_CB(skb)->pending_idx,
+ XEN_NETIF_RSP_OKAY);
+
+ /* Invalidate preceding fragments of this skb. */
for (j = 0; j < i; j++) {
pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
xenvif_idx_unmap(queue, pending_idx);
+ xenvif_idx_release(queue, pending_idx,
+ XEN_NETIF_RSP_OKAY);
+ }
+
+ /* And if we found the error while checking the frag_list, unmap
+ * the first skb's frags
+ */
+ if (first_shinfo) {
+ for (j = 0; j < first_shinfo->nr_frags; j++) {
+ pending_idx = frag_get_pending_idx(&first_shinfo->frags[j]);
+ xenvif_idx_unmap(queue, pending_idx);
+ xenvif_idx_release(queue, pending_idx,
+ XEN_NETIF_RSP_OKAY);
+ }
}
/* Remember the error: invalidate all subsequent fragments. */
err = newerr;
}
- if (skb_has_frag_list(skb)) {
- first_skb = skb;
- skb = shinfo->frag_list;
- shinfo = skb_shinfo(skb);
+ if (skb_has_frag_list(skb) && !first_shinfo) {
+ first_shinfo = skb_shinfo(skb);
+ shinfo = skb_shinfo(skb_shinfo(skb)->frag_list);
nr_frags = shinfo->nr_frags;
goto check_frags;
}
- /* There was a mapping error in the frag_list skb. We have to unmap
- * the first skb's frags
- */
- if (first_skb && err) {
- int j;
- shinfo = skb_shinfo(first_skb);
- for (j = 0; j < shinfo->nr_frags; j++) {
- pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
- xenvif_idx_unmap(queue, pending_idx);
- }
- }
-
*gopp_map = gop_map;
return err;
}
@@ -1518,7 +1551,16 @@
/* Check the remap error code. */
if (unlikely(xenvif_tx_check_gop(queue, skb, &gop_map, &gop_copy))) {
+ /* If there was an error, xenvif_tx_check_gop is
+ * expected to release all the frags which were mapped,
+ * so kfree_skb shouldn't do it again
+ */
skb_shinfo(skb)->nr_frags = 0;
+ if (skb_has_frag_list(skb)) {
+ struct sk_buff *nskb =
+ skb_shinfo(skb)->frag_list;
+ skb_shinfo(nskb)->nr_frags = 0;
+ }
kfree_skb(skb);
continue;
}
@@ -1822,8 +1864,6 @@
tx_unmap_op.status);
BUG();
}
-
- xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_OKAY);
}
static inline int rx_work_todo(struct xenvif_queue *queue)
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 2ccb4a0..055222b 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1439,16 +1439,11 @@
unsigned int i = 0;
unsigned int num_queues = info->netdev->real_num_tx_queues;
+ netif_carrier_off(info->netdev);
+
for (i = 0; i < num_queues; ++i) {
struct netfront_queue *queue = &info->queues[i];
- /* Stop old i/f to prevent errors whilst we rebuild the state. */
- spin_lock_bh(&queue->rx_lock);
- spin_lock_irq(&queue->tx_lock);
- netif_carrier_off(queue->info->netdev);
- spin_unlock_irq(&queue->tx_lock);
- spin_unlock_bh(&queue->rx_lock);
-
if (queue->tx_irq && (queue->tx_irq == queue->rx_irq))
unbind_from_irqhandler(queue->tx_irq, queue);
if (queue->tx_irq && (queue->tx_irq != queue->rx_irq)) {
@@ -1458,6 +1453,8 @@
queue->tx_evtchn = queue->rx_evtchn = 0;
queue->tx_irq = queue->rx_irq = 0;
+ napi_synchronize(&queue->napi);
+
/* End access and free the pages */
xennet_end_access(queue->tx_ring_ref, queue->tx.sring);
xennet_end_access(queue->rx_ring_ref, queue->rx.sring);
@@ -2046,13 +2043,15 @@
/* By now, the queue structures have been set up */
for (j = 0; j < num_queues; ++j) {
queue = &np->queues[j];
- spin_lock_bh(&queue->rx_lock);
- spin_lock_irq(&queue->tx_lock);
/* Step 1: Discard all pending TX packet fragments. */
+ spin_lock_irq(&queue->tx_lock);
xennet_release_tx_bufs(queue);
+ spin_unlock_irq(&queue->tx_lock);
/* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */
+ spin_lock_bh(&queue->rx_lock);
+
for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) {
skb_frag_t *frag;
const struct page *page;
@@ -2076,6 +2075,8 @@
}
queue->rx.req_prod_pvt = requeue_idx;
+
+ spin_unlock_bh(&queue->rx_lock);
}
/*
@@ -2087,13 +2088,17 @@
netif_carrier_on(np->netdev);
for (j = 0; j < num_queues; ++j) {
queue = &np->queues[j];
+
notify_remote_via_irq(queue->tx_irq);
if (queue->tx_irq != queue->rx_irq)
notify_remote_via_irq(queue->rx_irq);
- xennet_tx_buf_gc(queue);
- xennet_alloc_rx_buffers(queue);
+ spin_lock_irq(&queue->tx_lock);
+ xennet_tx_buf_gc(queue);
spin_unlock_irq(&queue->tx_lock);
+
+ spin_lock_bh(&queue->rx_lock);
+ xennet_alloc_rx_buffers(queue);
spin_unlock_bh(&queue->rx_lock);
}
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index a3bf212..401b245 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -182,40 +182,6 @@
}
EXPORT_SYMBOL(of_mdiobus_register);
-/**
- * of_mdiobus_link_phydev - Find a device node for a phy
- * @mdio: pointer to mii_bus structure
- * @phydev: phydev for which the of_node pointer should be set
- *
- * Walk the list of subnodes of a mdio bus and look for a node that matches the
- * phy's address with its 'reg' property. If found, set the of_node pointer for
- * the phy. This allows auto-probed pyh devices to be supplied with information
- * passed in via DT.
- */
-void of_mdiobus_link_phydev(struct mii_bus *mdio,
- struct phy_device *phydev)
-{
- struct device *dev = &phydev->dev;
- struct device_node *child;
-
- if (dev->of_node || !mdio->dev.of_node)
- return;
-
- for_each_available_child_of_node(mdio->dev.of_node, child) {
- int addr;
-
- addr = of_mdio_parse_addr(&mdio->dev, child);
- if (addr < 0)
- continue;
-
- if (addr == phydev->addr) {
- dev->of_node = child;
- return;
- }
- }
-}
-EXPORT_SYMBOL(of_mdiobus_link_phydev);
-
/* Helper function for of_phy_find_device */
static int of_phy_match(struct device *dev, void *phy_np)
{
diff --git a/drivers/parport/Kconfig b/drivers/parport/Kconfig
index 2872ece..44333bd 100644
--- a/drivers/parport/Kconfig
+++ b/drivers/parport/Kconfig
@@ -5,6 +5,12 @@
# Parport configuration.
#
+config ARCH_MIGHT_HAVE_PC_PARPORT
+ bool
+ help
+ Select this config option from the architecture Kconfig if
+ the architecture might have PC parallel port hardware.
+
menuconfig PARPORT
tristate "Parallel port support"
depends on HAS_IOMEM
@@ -31,12 +37,6 @@
If unsure, say Y.
-config ARCH_MIGHT_HAVE_PC_PARPORT
- bool
- help
- Select this config option from the architecture Kconfig if
- the architecture might have PC parallel port hardware.
-
if PARPORT
config PARPORT_PC
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index 602d153..70741c8 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -80,8 +80,9 @@
return NULL;
context->refcount = 1;
- acpi_set_hp_context(adev, &context->hp, acpiphp_hotplug_notify, NULL,
- acpiphp_post_dock_fixup);
+ context->hp.notify = acpiphp_hotplug_notify;
+ context->hp.fixup = acpiphp_post_dock_fixup;
+ acpi_set_hp_context(adev, &context->hp);
return context;
}
@@ -369,20 +370,6 @@
return AE_OK;
}
-static struct acpiphp_bridge *acpiphp_dev_to_bridge(struct acpi_device *adev)
-{
- struct acpiphp_bridge *bridge = NULL;
-
- acpi_lock_hp_context();
- if (adev->hp) {
- bridge = to_acpiphp_root_context(adev->hp)->root_bridge;
- if (bridge)
- get_bridge(bridge);
- }
- acpi_unlock_hp_context();
- return bridge;
-}
-
static void cleanup_bridge(struct acpiphp_bridge *bridge)
{
struct acpiphp_slot *slot;
@@ -753,9 +740,15 @@
void acpiphp_check_host_bridge(struct acpi_device *adev)
{
- struct acpiphp_bridge *bridge;
+ struct acpiphp_bridge *bridge = NULL;
- bridge = acpiphp_dev_to_bridge(adev);
+ acpi_lock_hp_context();
+ if (adev->hp) {
+ bridge = to_acpiphp_root_context(adev->hp)->root_bridge;
+ if (bridge)
+ get_bridge(bridge);
+ }
+ acpi_unlock_hp_context();
if (bridge) {
pci_lock_rescan_remove();
@@ -884,7 +877,7 @@
goto err;
root_context->root_bridge = bridge;
- acpi_set_hp_context(adev, &root_context->hp, NULL, NULL, NULL);
+ acpi_set_hp_context(adev, &root_context->hp);
} else {
struct acpiphp_context *context;
@@ -927,7 +920,7 @@
kfree(bridge);
}
-void acpiphp_drop_bridge(struct acpiphp_bridge *bridge)
+static void acpiphp_drop_bridge(struct acpiphp_bridge *bridge)
{
if (pci_is_root_bus(bridge->pci_bus)) {
struct acpiphp_root_context *root_context;
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index ca4927b..37263b0 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -18,31 +18,31 @@
#include "pci.h"
/**
- * pci_acpi_wake_bus - Wake-up notification handler for root buses.
- * @handle: ACPI handle of a device the notification is for.
- * @event: Type of the signaled event.
- * @context: PCI root bus to wake up devices on.
+ * pci_acpi_wake_bus - Root bus wakeup notification fork function.
+ * @work: Work item to handle.
*/
-static void pci_acpi_wake_bus(acpi_handle handle, u32 event, void *context)
+static void pci_acpi_wake_bus(struct work_struct *work)
{
- struct pci_bus *pci_bus = context;
+ struct acpi_device *adev;
+ struct acpi_pci_root *root;
- if (event == ACPI_NOTIFY_DEVICE_WAKE && pci_bus)
- pci_pme_wakeup_bus(pci_bus);
+ adev = container_of(work, struct acpi_device, wakeup.context.work);
+ root = acpi_driver_data(adev);
+ pci_pme_wakeup_bus(root->bus);
}
/**
- * pci_acpi_wake_dev - Wake-up notification handler for PCI devices.
+ * pci_acpi_wake_dev - PCI device wakeup notification work function.
* @handle: ACPI handle of a device the notification is for.
- * @event: Type of the signaled event.
- * @context: PCI device object to wake up.
+ * @work: Work item to handle.
*/
-static void pci_acpi_wake_dev(acpi_handle handle, u32 event, void *context)
+static void pci_acpi_wake_dev(struct work_struct *work)
{
- struct pci_dev *pci_dev = context;
+ struct acpi_device_wakeup_context *context;
+ struct pci_dev *pci_dev;
- if (event != ACPI_NOTIFY_DEVICE_WAKE || !pci_dev)
- return;
+ context = container_of(work, struct acpi_device_wakeup_context, work);
+ pci_dev = to_pci_dev(context->dev);
if (pci_dev->pme_poll)
pci_dev->pme_poll = false;
@@ -65,23 +65,12 @@
}
/**
- * pci_acpi_add_bus_pm_notifier - Register PM notifier for given PCI bus.
- * @dev: ACPI device to add the notifier for.
- * @pci_bus: PCI bus to walk checking for PME status if an event is signaled.
+ * pci_acpi_add_bus_pm_notifier - Register PM notifier for root PCI bus.
+ * @dev: PCI root bridge ACPI device.
*/
-acpi_status pci_acpi_add_bus_pm_notifier(struct acpi_device *dev,
- struct pci_bus *pci_bus)
+acpi_status pci_acpi_add_bus_pm_notifier(struct acpi_device *dev)
{
- return acpi_add_pm_notifier(dev, pci_acpi_wake_bus, pci_bus);
-}
-
-/**
- * pci_acpi_remove_bus_pm_notifier - Unregister PCI bus PM notifier.
- * @dev: ACPI device to remove the notifier from.
- */
-acpi_status pci_acpi_remove_bus_pm_notifier(struct acpi_device *dev)
-{
- return acpi_remove_pm_notifier(dev, pci_acpi_wake_bus);
+ return acpi_add_pm_notifier(dev, NULL, pci_acpi_wake_bus);
}
/**
@@ -92,16 +81,7 @@
acpi_status pci_acpi_add_pm_notifier(struct acpi_device *dev,
struct pci_dev *pci_dev)
{
- return acpi_add_pm_notifier(dev, pci_acpi_wake_dev, pci_dev);
-}
-
-/**
- * pci_acpi_remove_pm_notifier - Unregister PCI device PM notifier.
- * @dev: ACPI device to remove the notifier from.
- */
-acpi_status pci_acpi_remove_pm_notifier(struct acpi_device *dev)
-{
- return acpi_remove_pm_notifier(dev, pci_acpi_wake_dev);
+ return acpi_add_pm_notifier(dev, &pci_dev->dev, pci_acpi_wake_dev);
}
phys_addr_t acpi_pci_root_get_mcfg_addr(acpi_handle handle)
@@ -170,14 +150,13 @@
static bool acpi_pci_power_manageable(struct pci_dev *dev)
{
- acpi_handle handle = ACPI_HANDLE(&dev->dev);
-
- return handle ? acpi_bus_power_manageable(handle) : false;
+ struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
+ return adev ? acpi_device_power_manageable(adev) : false;
}
static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state)
{
- acpi_handle handle = ACPI_HANDLE(&dev->dev);
+ struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
static const u8 state_conv[] = {
[PCI_D0] = ACPI_STATE_D0,
[PCI_D1] = ACPI_STATE_D1,
@@ -188,7 +167,7 @@
int error = -EINVAL;
/* If the ACPI device has _EJ0, ignore the device */
- if (!handle || acpi_has_method(handle, "_EJ0"))
+ if (!adev || acpi_has_method(adev->handle, "_EJ0"))
return -ENODEV;
switch (state) {
@@ -202,7 +181,7 @@
case PCI_D1:
case PCI_D2:
case PCI_D3hot:
- error = acpi_bus_set_power(handle, state_conv[state]);
+ error = acpi_device_set_power(adev, state_conv[state]);
}
if (!error)
@@ -214,9 +193,8 @@
static bool acpi_pci_can_wakeup(struct pci_dev *dev)
{
- acpi_handle handle = ACPI_HANDLE(&dev->dev);
-
- return handle ? acpi_bus_can_wakeup(handle) : false;
+ struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
+ return adev ? acpi_device_can_wakeup(adev) : false;
}
static void acpi_pci_propagate_wakeup_enable(struct pci_bus *bus, bool enable)
diff --git a/drivers/pinctrl/pinctrl-st.c b/drivers/pinctrl/pinctrl-st.c
index 1bd6363bc9..9f43916 100644
--- a/drivers/pinctrl/pinctrl-st.c
+++ b/drivers/pinctrl/pinctrl-st.c
@@ -1431,7 +1431,7 @@
status = readl(info->irqmux_base);
- for_each_set_bit(n, &status, ST_GPIO_PINS_PER_BANK)
+ for_each_set_bit(n, &status, info->nbanks)
__gpio_irq_handler(&info->banks[n]);
chained_irq_exit(chip, desc);
diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c
index b81448b..d2b780a 100644
--- a/drivers/pnp/pnpacpi/core.c
+++ b/drivers/pnp/pnpacpi/core.c
@@ -67,8 +67,8 @@
pnp_dbg(&dev->dev, "set resources\n");
- handle = ACPI_HANDLE(&dev->dev);
- if (!handle || acpi_bus_get_device(handle, &acpi_dev)) {
+ acpi_dev = ACPI_COMPANION(&dev->dev);
+ if (!acpi_dev) {
dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__);
return -ENODEV;
}
@@ -76,6 +76,7 @@
if (WARN_ON_ONCE(acpi_dev != dev->data))
dev->data = acpi_dev;
+ handle = acpi_dev->handle;
if (acpi_has_method(handle, METHOD_NAME__SRS)) {
struct acpi_buffer buffer;
@@ -93,8 +94,8 @@
}
kfree(buffer.pointer);
}
- if (!ret && acpi_bus_power_manageable(handle))
- ret = acpi_bus_set_power(handle, ACPI_STATE_D0);
+ if (!ret && acpi_device_power_manageable(acpi_dev))
+ ret = acpi_device_set_power(acpi_dev, ACPI_STATE_D0);
return ret;
}
@@ -102,23 +103,22 @@
static int pnpacpi_disable_resources(struct pnp_dev *dev)
{
struct acpi_device *acpi_dev;
- acpi_handle handle;
acpi_status status;
dev_dbg(&dev->dev, "disable resources\n");
- handle = ACPI_HANDLE(&dev->dev);
- if (!handle || acpi_bus_get_device(handle, &acpi_dev)) {
+ acpi_dev = ACPI_COMPANION(&dev->dev);
+ if (!acpi_dev) {
dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__);
return 0;
}
/* acpi_unregister_gsi(pnp_irq(dev, 0)); */
- if (acpi_bus_power_manageable(handle))
- acpi_bus_set_power(handle, ACPI_STATE_D3_COLD);
+ if (acpi_device_power_manageable(acpi_dev))
+ acpi_device_set_power(acpi_dev, ACPI_STATE_D3_COLD);
- /* continue even if acpi_bus_set_power() fails */
- status = acpi_evaluate_object(handle, "_DIS", NULL, NULL);
+ /* continue even if acpi_device_set_power() fails */
+ status = acpi_evaluate_object(acpi_dev->handle, "_DIS", NULL, NULL);
if (ACPI_FAILURE(status) && status != AE_NOT_FOUND)
return -ENODEV;
@@ -128,26 +128,22 @@
#ifdef CONFIG_ACPI_SLEEP
static bool pnpacpi_can_wakeup(struct pnp_dev *dev)
{
- struct acpi_device *acpi_dev;
- acpi_handle handle;
+ struct acpi_device *acpi_dev = ACPI_COMPANION(&dev->dev);
- handle = ACPI_HANDLE(&dev->dev);
- if (!handle || acpi_bus_get_device(handle, &acpi_dev)) {
+ if (!acpi_dev) {
dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__);
return false;
}
- return acpi_bus_can_wakeup(handle);
+ return acpi_bus_can_wakeup(acpi_dev->handle);
}
static int pnpacpi_suspend(struct pnp_dev *dev, pm_message_t state)
{
- struct acpi_device *acpi_dev;
- acpi_handle handle;
+ struct acpi_device *acpi_dev = ACPI_COMPANION(&dev->dev);
int error = 0;
- handle = ACPI_HANDLE(&dev->dev);
- if (!handle || acpi_bus_get_device(handle, &acpi_dev)) {
+ if (!acpi_dev) {
dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__);
return 0;
}
@@ -159,7 +155,7 @@
return error;
}
- if (acpi_bus_power_manageable(handle)) {
+ if (acpi_device_power_manageable(acpi_dev)) {
int power_state = acpi_pm_device_sleep_state(&dev->dev, NULL,
ACPI_STATE_D3_COLD);
if (power_state < 0)
@@ -167,12 +163,12 @@
ACPI_STATE_D0 : ACPI_STATE_D3_COLD;
/*
- * acpi_bus_set_power() often fails (keyboard port can't be
+ * acpi_device_set_power() can fail (keyboard port can't be
* powered-down?), and in any case, our return value is ignored
* by pnp_bus_suspend(). Hence we don't revert the wakeup
* setting if the set_power fails.
*/
- error = acpi_bus_set_power(handle, power_state);
+ error = acpi_device_set_power(acpi_dev, power_state);
}
return error;
@@ -180,11 +176,10 @@
static int pnpacpi_resume(struct pnp_dev *dev)
{
- struct acpi_device *acpi_dev;
- acpi_handle handle = ACPI_HANDLE(&dev->dev);
+ struct acpi_device *acpi_dev = ACPI_COMPANION(&dev->dev);
int error = 0;
- if (!handle || acpi_bus_get_device(handle, &acpi_dev)) {
+ if (!acpi_dev) {
dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__);
return -ENODEV;
}
@@ -192,8 +187,8 @@
if (device_may_wakeup(&dev->dev))
acpi_pm_device_sleep_wake(&dev->dev, false);
- if (acpi_bus_power_manageable(handle))
- error = acpi_bus_set_power(handle, ACPI_STATE_D0);
+ if (acpi_device_power_manageable(acpi_dev))
+ error = acpi_device_set_power(acpi_dev, ACPI_STATE_D0);
return error;
}
@@ -295,9 +290,11 @@
return error;
}
+ error = acpi_bind_one(&dev->dev, device);
+
num++;
- return 0;
+ return error;
}
static acpi_status __init pnpacpi_add_device_handler(acpi_handle handle,
@@ -313,41 +310,6 @@
return AE_OK;
}
-static int __init acpi_pnp_match(struct device *dev, void *_pnp)
-{
- struct acpi_device *acpi = to_acpi_device(dev);
- struct pnp_dev *pnp = _pnp;
-
- /* true means it matched */
- return !acpi->physical_node_count
- && compare_pnp_id(pnp->id, acpi_device_hid(acpi));
-}
-
-static struct acpi_device * __init acpi_pnp_find_companion(struct device *dev)
-{
- dev = bus_find_device(&acpi_bus_type, NULL, to_pnp_dev(dev),
- acpi_pnp_match);
- if (!dev)
- return NULL;
-
- put_device(dev);
- return to_acpi_device(dev);
-}
-
-/* complete initialization of a PNPACPI device includes having
- * pnpdev->dev.archdata.acpi_handle point to its ACPI sibling.
- */
-static bool acpi_pnp_bus_match(struct device *dev)
-{
- return dev->bus == &pnp_bus_type;
-}
-
-static struct acpi_bus_type __initdata acpi_pnp_bus = {
- .name = "PNP",
- .match = acpi_pnp_bus_match,
- .find_companion = acpi_pnp_find_companion,
-};
-
int pnpacpi_disabled __initdata;
static int __init pnpacpi_init(void)
{
@@ -357,10 +319,8 @@
}
printk(KERN_INFO "pnp: PnP ACPI init\n");
pnp_register_protocol(&pnpacpi_protocol);
- register_acpi_bus_type(&acpi_pnp_bus);
acpi_get_devices(NULL, pnpacpi_add_device_handler, NULL, NULL);
printk(KERN_INFO "pnp: PnP ACPI: found %d devices\n", num);
- unregister_acpi_bus_type(&acpi_pnp_bus);
pnp_platform_devices = 1;
return 0;
}
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c
index 15b3459..220acb4 100644
--- a/drivers/s390/char/raw3270.c
+++ b/drivers/s390/char/raw3270.c
@@ -633,7 +633,6 @@
} else
raw3270_writesf_readpart(rp);
memset(&rp->init_reset, 0, sizeof(rp->init_reset));
- memset(&rp->init_data, 0, sizeof(rp->init_data));
}
static int
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 69ef4f8..4038437 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -901,10 +901,15 @@
int rc;
ap_dev->drv = ap_drv;
+
+ spin_lock_bh(&ap_device_list_lock);
+ list_add(&ap_dev->list, &ap_device_list);
+ spin_unlock_bh(&ap_device_list_lock);
+
rc = ap_drv->probe ? ap_drv->probe(ap_dev) : -ENODEV;
- if (!rc) {
+ if (rc) {
spin_lock_bh(&ap_device_list_lock);
- list_add(&ap_dev->list, &ap_device_list);
+ list_del_init(&ap_dev->list);
spin_unlock_bh(&ap_device_list_lock);
}
return rc;
diff --git a/drivers/staging/media/omap4iss/Kconfig b/drivers/staging/media/omap4iss/Kconfig
index 78b0fba..8afc6fe 100644
--- a/drivers/staging/media/omap4iss/Kconfig
+++ b/drivers/staging/media/omap4iss/Kconfig
@@ -1,6 +1,6 @@
config VIDEO_OMAP4
bool "OMAP 4 Camera support"
- depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API && I2C && ARCH_OMAP4
+ depends on VIDEO_V4L2=y && VIDEO_V4L2_SUBDEV_API && I2C=y && ARCH_OMAP4
select VIDEOBUF2_DMA_CONTIG
---help---
Driver for an OMAP 4 ISS controller.
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index 9d2b673..b8125aa 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -1169,8 +1169,8 @@
if (hwep->type == USB_ENDPOINT_XFER_CONTROL)
cap |= QH_IOS;
- if (hwep->num)
- cap |= QH_ZLT;
+
+ cap |= QH_ZLT;
cap |= (hwep->ep.maxpacket << __ffs(QH_MAX_PKT)) & QH_MAX_PKT;
/*
* For ISO-TX, we set mult at QH as the largest value, and use
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 21b99b4..0e950ad 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -889,6 +889,25 @@
if (!hub_is_superspeed(hub->hdev))
return -EINVAL;
+ ret = hub_port_status(hub, port1, &portstatus, &portchange);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * USB controller Advanced Micro Devices, Inc. [AMD] FCH USB XHCI
+ * Controller [1022:7814] will have spurious result making the following
+ * usb 3.0 device hotplugging route to the 2.0 root hub and recognized
+ * as high-speed device if we set the usb 3.0 port link state to
+ * Disabled. Since it's already in USB_SS_PORT_LS_RX_DETECT state, we
+ * check the state here to avoid the bug.
+ */
+ if ((portstatus & USB_PORT_STAT_LINK_STATE) ==
+ USB_SS_PORT_LS_RX_DETECT) {
+ dev_dbg(&hub->ports[port1 - 1]->dev,
+ "Not disabling port; link state is RxDetect\n");
+ return ret;
+ }
+
ret = hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_SS_DISABLED);
if (ret)
return ret;
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index b7a506f..5c660c7 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -426,20 +426,18 @@
* p2m are consistent.
*/
if (!xen_feature(XENFEAT_auto_translated_physmap)) {
- unsigned long p;
- struct page *scratch_page = get_balloon_scratch_page();
-
if (!PageHighMem(page)) {
+ struct page *scratch_page = get_balloon_scratch_page();
+
ret = HYPERVISOR_update_va_mapping(
(unsigned long)__va(pfn << PAGE_SHIFT),
pfn_pte(page_to_pfn(scratch_page),
PAGE_KERNEL_RO), 0);
BUG_ON(ret);
- }
- p = page_to_pfn(scratch_page);
- __set_phys_to_machine(pfn, pfn_to_mfn(p));
- put_balloon_scratch_page();
+ put_balloon_scratch_page();
+ }
+ __set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
}
#endif
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
index c3667b2..5f1e1f3 100644
--- a/drivers/xen/manage.c
+++ b/drivers/xen/manage.c
@@ -88,7 +88,6 @@
if (!si->cancelled) {
xen_irq_resume();
- xen_console_resume();
xen_timer_resume();
}
@@ -135,6 +134,10 @@
err = stop_machine(xen_suspend, &si, cpumask_of(0));
+ /* Resume console as early as possible. */
+ if (!si.cancelled)
+ xen_console_resume();
+
raw_notifier_call_chain(&xen_resume_notifier, 0, NULL);
dpm_resume_start(si.cancelled ? PMSG_THAW : PMSG_RESTORE);
diff --git a/fs/aio.c b/fs/aio.c
index 955947e..1c9c5f0 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -830,16 +830,20 @@
static void put_reqs_available(struct kioctx *ctx, unsigned nr)
{
struct kioctx_cpu *kcpu;
+ unsigned long flags;
preempt_disable();
kcpu = this_cpu_ptr(ctx->cpu);
+ local_irq_save(flags);
kcpu->reqs_available += nr;
+
while (kcpu->reqs_available >= ctx->req_batch * 2) {
kcpu->reqs_available -= ctx->req_batch;
atomic_add(ctx->req_batch, &ctx->reqs_available);
}
+ local_irq_restore(flags);
preempt_enable();
}
@@ -847,10 +851,12 @@
{
struct kioctx_cpu *kcpu;
bool ret = false;
+ unsigned long flags;
preempt_disable();
kcpu = this_cpu_ptr(ctx->cpu);
+ local_irq_save(flags);
if (!kcpu->reqs_available) {
int old, avail = atomic_read(&ctx->reqs_available);
@@ -869,6 +875,7 @@
ret = true;
kcpu->reqs_available--;
out:
+ local_irq_restore(flags);
preempt_enable();
return ret;
}
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index e12441c..7187b14 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -484,8 +484,19 @@
log_list);
list_del_init(&ordered->log_list);
spin_unlock_irq(&log->log_extents_lock[index]);
+
+ if (!test_bit(BTRFS_ORDERED_IO_DONE, &ordered->flags) &&
+ !test_bit(BTRFS_ORDERED_DIRECT, &ordered->flags)) {
+ struct inode *inode = ordered->inode;
+ u64 start = ordered->file_offset;
+ u64 end = ordered->file_offset + ordered->len - 1;
+
+ WARN_ON(!inode);
+ filemap_fdatawrite_range(inode->i_mapping, start, end);
+ }
wait_event(ordered->wait, test_bit(BTRFS_ORDERED_IO_DONE,
&ordered->flags));
+
btrfs_put_ordered_extent(ordered);
spin_lock_irq(&log->log_extents_lock[index]);
}
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 6104676..6cb82f6 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -1680,11 +1680,11 @@
if (device->bdev == root->fs_info->fs_devices->latest_bdev)
root->fs_info->fs_devices->latest_bdev = next_device->bdev;
- if (device->bdev)
+ if (device->bdev) {
device->fs_devices->open_devices--;
-
- /* remove sysfs entry */
- btrfs_kobj_rm_device(root->fs_info, device);
+ /* remove sysfs entry */
+ btrfs_kobj_rm_device(root->fs_info, device);
+ }
call_rcu(&device->rcu, free_device);
diff --git a/fs/coredump.c b/fs/coredump.c
index 0b2528f..a93f7e6 100644
--- a/fs/coredump.c
+++ b/fs/coredump.c
@@ -306,7 +306,7 @@
if (unlikely(nr < 0))
return nr;
- tsk->flags = PF_DUMPCORE;
+ tsk->flags |= PF_DUMPCORE;
if (atomic_read(&mm->mm_users) == nr + 1)
goto done;
/*
diff --git a/fs/direct-io.c b/fs/direct-io.c
index 98040ba..194d0d1 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -198,9 +198,8 @@
* L1 cache.
*/
static inline struct page *dio_get_page(struct dio *dio,
- struct dio_submit *sdio, size_t *from, size_t *to)
+ struct dio_submit *sdio)
{
- int n;
if (dio_pages_present(sdio) == 0) {
int ret;
@@ -209,10 +208,7 @@
return ERR_PTR(ret);
BUG_ON(dio_pages_present(sdio) == 0);
}
- n = sdio->head++;
- *from = n ? 0 : sdio->from;
- *to = (n == sdio->tail - 1) ? sdio->to : PAGE_SIZE;
- return dio->pages[n];
+ return dio->pages[sdio->head];
}
/**
@@ -911,11 +907,15 @@
while (sdio->block_in_file < sdio->final_block_in_request) {
struct page *page;
size_t from, to;
- page = dio_get_page(dio, sdio, &from, &to);
+
+ page = dio_get_page(dio, sdio);
if (IS_ERR(page)) {
ret = PTR_ERR(page);
goto out;
}
+ from = sdio->head ? 0 : sdio->from;
+ to = (sdio->head == sdio->tail - 1) ? sdio->to : PAGE_SIZE;
+ sdio->head++;
while (from < to) {
unsigned this_chunk_bytes; /* # of bytes mapped */
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 098f97b..ca88731 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -643,9 +643,8 @@
unsigned long seglen;
unsigned long addr;
struct page *pg;
- void *mapaddr;
- void *buf;
unsigned len;
+ unsigned offset;
unsigned move_pages:1;
};
@@ -666,23 +665,17 @@
if (cs->currbuf) {
struct pipe_buffer *buf = cs->currbuf;
- if (!cs->write) {
- kunmap_atomic(cs->mapaddr);
- } else {
- kunmap_atomic(cs->mapaddr);
+ if (cs->write)
buf->len = PAGE_SIZE - cs->len;
- }
cs->currbuf = NULL;
- cs->mapaddr = NULL;
- } else if (cs->mapaddr) {
- kunmap_atomic(cs->mapaddr);
+ } else if (cs->pg) {
if (cs->write) {
flush_dcache_page(cs->pg);
set_page_dirty_lock(cs->pg);
}
put_page(cs->pg);
- cs->mapaddr = NULL;
}
+ cs->pg = NULL;
}
/*
@@ -691,7 +684,7 @@
*/
static int fuse_copy_fill(struct fuse_copy_state *cs)
{
- unsigned long offset;
+ struct page *page;
int err;
unlock_request(cs->fc, cs->req);
@@ -706,14 +699,12 @@
BUG_ON(!cs->nr_segs);
cs->currbuf = buf;
- cs->mapaddr = kmap_atomic(buf->page);
+ cs->pg = buf->page;
+ cs->offset = buf->offset;
cs->len = buf->len;
- cs->buf = cs->mapaddr + buf->offset;
cs->pipebufs++;
cs->nr_segs--;
} else {
- struct page *page;
-
if (cs->nr_segs == cs->pipe->buffers)
return -EIO;
@@ -726,8 +717,8 @@
buf->len = 0;
cs->currbuf = buf;
- cs->mapaddr = kmap_atomic(page);
- cs->buf = cs->mapaddr;
+ cs->pg = page;
+ cs->offset = 0;
cs->len = PAGE_SIZE;
cs->pipebufs++;
cs->nr_segs++;
@@ -740,14 +731,13 @@
cs->iov++;
cs->nr_segs--;
}
- err = get_user_pages_fast(cs->addr, 1, cs->write, &cs->pg);
+ err = get_user_pages_fast(cs->addr, 1, cs->write, &page);
if (err < 0)
return err;
BUG_ON(err != 1);
- offset = cs->addr % PAGE_SIZE;
- cs->mapaddr = kmap_atomic(cs->pg);
- cs->buf = cs->mapaddr + offset;
- cs->len = min(PAGE_SIZE - offset, cs->seglen);
+ cs->pg = page;
+ cs->offset = cs->addr % PAGE_SIZE;
+ cs->len = min(PAGE_SIZE - cs->offset, cs->seglen);
cs->seglen -= cs->len;
cs->addr += cs->len;
}
@@ -760,15 +750,20 @@
{
unsigned ncpy = min(*size, cs->len);
if (val) {
+ void *pgaddr = kmap_atomic(cs->pg);
+ void *buf = pgaddr + cs->offset;
+
if (cs->write)
- memcpy(cs->buf, *val, ncpy);
+ memcpy(buf, *val, ncpy);
else
- memcpy(*val, cs->buf, ncpy);
+ memcpy(*val, buf, ncpy);
+
+ kunmap_atomic(pgaddr);
*val += ncpy;
}
*size -= ncpy;
cs->len -= ncpy;
- cs->buf += ncpy;
+ cs->offset += ncpy;
return ncpy;
}
@@ -874,8 +869,8 @@
out_fallback_unlock:
unlock_page(newpage);
out_fallback:
- cs->mapaddr = kmap_atomic(buf->page);
- cs->buf = cs->mapaddr + buf->offset;
+ cs->pg = buf->page;
+ cs->offset = buf->offset;
err = lock_request(cs->fc, cs->req);
if (err)
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index 4219835..0c60482 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -198,7 +198,8 @@
inode = ACCESS_ONCE(entry->d_inode);
if (inode && is_bad_inode(inode))
goto invalid;
- else if (fuse_dentry_time(entry) < get_jiffies_64()) {
+ else if (time_before64(fuse_dentry_time(entry), get_jiffies_64()) ||
+ (flags & LOOKUP_REVAL)) {
int err;
struct fuse_entry_out outarg;
struct fuse_req *req;
@@ -814,13 +815,6 @@
return err;
}
-static int fuse_rename(struct inode *olddir, struct dentry *oldent,
- struct inode *newdir, struct dentry *newent)
-{
- return fuse_rename_common(olddir, oldent, newdir, newent, 0,
- FUSE_RENAME, sizeof(struct fuse_rename_in));
-}
-
static int fuse_rename2(struct inode *olddir, struct dentry *oldent,
struct inode *newdir, struct dentry *newent,
unsigned int flags)
@@ -831,17 +825,30 @@
if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE))
return -EINVAL;
- if (fc->no_rename2 || fc->minor < 23)
- return -EINVAL;
+ if (flags) {
+ if (fc->no_rename2 || fc->minor < 23)
+ return -EINVAL;
- err = fuse_rename_common(olddir, oldent, newdir, newent, flags,
- FUSE_RENAME2, sizeof(struct fuse_rename2_in));
- if (err == -ENOSYS) {
- fc->no_rename2 = 1;
- err = -EINVAL;
+ err = fuse_rename_common(olddir, oldent, newdir, newent, flags,
+ FUSE_RENAME2,
+ sizeof(struct fuse_rename2_in));
+ if (err == -ENOSYS) {
+ fc->no_rename2 = 1;
+ err = -EINVAL;
+ }
+ } else {
+ err = fuse_rename_common(olddir, oldent, newdir, newent, 0,
+ FUSE_RENAME,
+ sizeof(struct fuse_rename_in));
}
- return err;
+ return err;
+}
+
+static int fuse_rename(struct inode *olddir, struct dentry *oldent,
+ struct inode *newdir, struct dentry *newent)
+{
+ return fuse_rename2(olddir, oldent, newdir, newent, 0);
}
static int fuse_link(struct dentry *entry, struct inode *newdir,
@@ -985,7 +992,7 @@
int err;
bool r;
- if (fi->i_time < get_jiffies_64()) {
+ if (time_before64(fi->i_time, get_jiffies_64())) {
r = true;
err = fuse_do_getattr(inode, stat, file);
} else {
@@ -1171,7 +1178,7 @@
((mask & MAY_EXEC) && S_ISREG(inode->i_mode))) {
struct fuse_inode *fi = get_fuse_inode(inode);
- if (fi->i_time < get_jiffies_64()) {
+ if (time_before64(fi->i_time, get_jiffies_64())) {
refreshed = true;
err = fuse_perm_getattr(inode, mask);
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 6e16dad..40ac262 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -1687,7 +1687,7 @@
error = -EIO;
req->ff = fuse_write_file_get(fc, fi);
if (!req->ff)
- goto err_free;
+ goto err_nofile;
fuse_write_fill(req, req->ff, page_offset(page), 0);
@@ -1715,6 +1715,8 @@
return 0;
+err_nofile:
+ __free_page(tmp_page);
err_free:
fuse_request_free(req);
err:
@@ -1955,8 +1957,8 @@
data.ff = NULL;
err = -ENOMEM;
- data.orig_pages = kzalloc(sizeof(struct page *) *
- FUSE_MAX_PAGES_PER_REQ,
+ data.orig_pages = kcalloc(FUSE_MAX_PAGES_PER_REQ,
+ sizeof(struct page *),
GFP_NOFS);
if (!data.orig_pages)
goto out;
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 754dcf2..03246cd 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -478,6 +478,17 @@
{OPT_ERR, NULL}
};
+static int fuse_match_uint(substring_t *s, unsigned int *res)
+{
+ int err = -ENOMEM;
+ char *buf = match_strdup(s);
+ if (buf) {
+ err = kstrtouint(buf, 10, res);
+ kfree(buf);
+ }
+ return err;
+}
+
static int parse_fuse_opt(char *opt, struct fuse_mount_data *d, int is_bdev)
{
char *p;
@@ -488,6 +499,7 @@
while ((p = strsep(&opt, ",")) != NULL) {
int token;
int value;
+ unsigned uv;
substring_t args[MAX_OPT_ARGS];
if (!*p)
continue;
@@ -511,18 +523,18 @@
break;
case OPT_USER_ID:
- if (match_int(&args[0], &value))
+ if (fuse_match_uint(&args[0], &uv))
return 0;
- d->user_id = make_kuid(current_user_ns(), value);
+ d->user_id = make_kuid(current_user_ns(), uv);
if (!uid_valid(d->user_id))
return 0;
d->user_id_present = 1;
break;
case OPT_GROUP_ID:
- if (match_int(&args[0], &value))
+ if (fuse_match_uint(&args[0], &uv))
return 0;
- d->group_id = make_kgid(current_user_ns(), value);
+ d->group_id = make_kgid(current_user_ns(), uv);
if (!gid_valid(d->group_id))
return 0;
d->group_id_present = 1;
@@ -895,9 +907,6 @@
fc->writeback_cache = 1;
if (arg->time_gran && arg->time_gran <= 1000000000)
fc->sb->s_time_gran = arg->time_gran;
- else
- fc->sb->s_time_gran = 1000000000;
-
} else {
ra_pages = fc->max_read / PAGE_CACHE_SIZE;
fc->no_lock = 1;
@@ -926,7 +935,7 @@
FUSE_SPLICE_WRITE | FUSE_SPLICE_MOVE | FUSE_SPLICE_READ |
FUSE_FLOCK_LOCKS | FUSE_IOCTL_DIR | FUSE_AUTO_INVAL_DATA |
FUSE_DO_READDIRPLUS | FUSE_READDIRPLUS_AUTO | FUSE_ASYNC_DIO |
- FUSE_WRITEBACK_CACHE;
+ FUSE_WRITEBACK_CACHE | FUSE_NO_OPEN_SUPPORT;
req->in.h.opcode = FUSE_INIT;
req->in.numargs = 1;
req->in.args[0].size = sizeof(*arg);
@@ -1006,7 +1015,7 @@
sb->s_flags &= ~(MS_NOSEC | MS_I_VERSION);
- if (!parse_fuse_opt((char *) data, &d, is_bdev))
+ if (!parse_fuse_opt(data, &d, is_bdev))
goto err;
if (is_bdev) {
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
index 4fc3a30..26b3f95 100644
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@ -981,7 +981,7 @@
int error = 0;
state = (fl->fl_type == F_WRLCK) ? LM_ST_EXCLUSIVE : LM_ST_SHARED;
- flags = (IS_SETLKW(cmd) ? 0 : LM_FLAG_TRY) | GL_EXACT | GL_NOCACHE;
+ flags = (IS_SETLKW(cmd) ? 0 : LM_FLAG_TRY) | GL_EXACT;
mutex_lock(&fp->f_fl_mutex);
@@ -991,7 +991,7 @@
goto out;
flock_lock_file_wait(file,
&(struct file_lock){.fl_type = F_UNLCK});
- gfs2_glock_dq_wait(fl_gh);
+ gfs2_glock_dq(fl_gh);
gfs2_holder_reinit(state, flags, fl_gh);
} else {
error = gfs2_glock_get(GFS2_SB(&ip->i_inode), ip->i_no_addr,
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index c355f73..ee4e04f 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -731,14 +731,14 @@
cachep = gfs2_glock_aspace_cachep;
else
cachep = gfs2_glock_cachep;
- gl = kmem_cache_alloc(cachep, GFP_KERNEL);
+ gl = kmem_cache_alloc(cachep, GFP_NOFS);
if (!gl)
return -ENOMEM;
memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb));
if (glops->go_flags & GLOF_LVB) {
- gl->gl_lksb.sb_lvbptr = kzalloc(GFS2_MIN_LVB_SIZE, GFP_KERNEL);
+ gl->gl_lksb.sb_lvbptr = kzalloc(GFS2_MIN_LVB_SIZE, GFP_NOFS);
if (!gl->gl_lksb.sb_lvbptr) {
kmem_cache_free(cachep, gl);
return -ENOMEM;
@@ -1404,12 +1404,16 @@
gl = list_entry(list->next, struct gfs2_glock, gl_lru);
list_del_init(&gl->gl_lru);
if (!spin_trylock(&gl->gl_spin)) {
+add_back_to_lru:
list_add(&gl->gl_lru, &lru_list);
atomic_inc(&lru_count);
continue;
}
+ if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
+ spin_unlock(&gl->gl_spin);
+ goto add_back_to_lru;
+ }
clear_bit(GLF_LRU, &gl->gl_flags);
- spin_unlock(&lru_lock);
gl->gl_lockref.count++;
if (demote_ok(gl))
handle_callback(gl, LM_ST_UNLOCKED, 0, false);
@@ -1417,7 +1421,7 @@
if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
gl->gl_lockref.count--;
spin_unlock(&gl->gl_spin);
- spin_lock(&lru_lock);
+ cond_resched_lock(&lru_lock);
}
}
@@ -1442,7 +1446,7 @@
gl = list_entry(lru_list.next, struct gfs2_glock, gl_lru);
/* Test for being demotable */
- if (!test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
+ if (!test_bit(GLF_LOCK, &gl->gl_flags)) {
list_move(&gl->gl_lru, &dispose);
atomic_dec(&lru_count);
freed++;
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index fc11007..2ffc67d 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -234,8 +234,8 @@
* inode_go_inval - prepare a inode glock to be released
* @gl: the glock
* @flags:
- *
- * Normally we invlidate everything, but if we are moving into
+ *
+ * Normally we invalidate everything, but if we are moving into
* LM_ST_DEFERRED from LM_ST_SHARED or LM_ST_EXCLUSIVE then we
* can keep hold of the metadata, since it won't have changed.
*
diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c
index 91f274d..4fafea1 100644
--- a/fs/gfs2/lock_dlm.c
+++ b/fs/gfs2/lock_dlm.c
@@ -1036,8 +1036,8 @@
new_size = old_size + RECOVER_SIZE_INC;
- submit = kzalloc(new_size * sizeof(uint32_t), GFP_NOFS);
- result = kzalloc(new_size * sizeof(uint32_t), GFP_NOFS);
+ submit = kcalloc(new_size, sizeof(uint32_t), GFP_NOFS);
+ result = kcalloc(new_size, sizeof(uint32_t), GFP_NOFS);
if (!submit || !result) {
kfree(submit);
kfree(result);
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index db629d1..f4cb9c0 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -337,7 +337,7 @@
/**
* gfs2_free_extlen - Return extent length of free blocks
- * @rbm: Starting position
+ * @rrbm: Starting position
* @len: Max length to check
*
* Starting at the block specified by the rbm, see how many free blocks
@@ -2522,7 +2522,7 @@
/**
* gfs2_rlist_free - free a resource group list
- * @list: the list of resource groups
+ * @rlist: the list of resource groups
*
*/
diff --git a/fs/namei.c b/fs/namei.c
index 985c6f3..9eb787e 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -2256,9 +2256,10 @@
goto out;
}
path->dentry = dentry;
- path->mnt = mntget(nd->path.mnt);
+ path->mnt = nd->path.mnt;
if (should_follow_link(dentry, nd->flags & LOOKUP_FOLLOW))
return 1;
+ mntget(path->mnt);
follow_mount(path);
error = 0;
out:
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index 8f98138..f11b9ee 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -756,7 +756,6 @@
spin_unlock(&dreq->lock);
while (!list_empty(&hdr->pages)) {
- bool do_destroy = true;
req = nfs_list_entry(hdr->pages.next);
nfs_list_remove_request(req);
@@ -765,7 +764,6 @@
case NFS_IOHDR_NEED_COMMIT:
kref_get(&req->wb_kref);
nfs_mark_request_commit(req, hdr->lseg, &cinfo);
- do_destroy = false;
}
nfs_unlock_and_release_request(req);
}
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 82ddbf4..f415cbf 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -244,6 +244,7 @@
int nfs_generic_pgio(struct nfs_pageio_descriptor *, struct nfs_pgio_header *);
int nfs_initiate_pgio(struct rpc_clnt *, struct nfs_pgio_data *,
const struct rpc_call_ops *, int, int);
+void nfs_free_request(struct nfs_page *req);
static inline void nfs_iocounter_init(struct nfs_io_counter *c)
{
diff --git a/fs/nfs/nfs3acl.c b/fs/nfs/nfs3acl.c
index 871d6ed..8f854dd 100644
--- a/fs/nfs/nfs3acl.c
+++ b/fs/nfs/nfs3acl.c
@@ -247,3 +247,46 @@
&posix_acl_default_xattr_handler,
NULL,
};
+
+static int
+nfs3_list_one_acl(struct inode *inode, int type, const char *name, void *data,
+ size_t size, ssize_t *result)
+{
+ struct posix_acl *acl;
+ char *p = data + *result;
+
+ acl = get_acl(inode, type);
+ if (!acl)
+ return 0;
+
+ posix_acl_release(acl);
+
+ *result += strlen(name);
+ *result += 1;
+ if (!size)
+ return 0;
+ if (*result > size)
+ return -ERANGE;
+
+ strcpy(p, name);
+ return 0;
+}
+
+ssize_t
+nfs3_listxattr(struct dentry *dentry, char *data, size_t size)
+{
+ struct inode *inode = dentry->d_inode;
+ ssize_t result = 0;
+ int error;
+
+ error = nfs3_list_one_acl(inode, ACL_TYPE_ACCESS,
+ POSIX_ACL_XATTR_ACCESS, data, size, &result);
+ if (error)
+ return error;
+
+ error = nfs3_list_one_acl(inode, ACL_TYPE_DEFAULT,
+ POSIX_ACL_XATTR_DEFAULT, data, size, &result);
+ if (error)
+ return error;
+ return result;
+}
diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
index e7daa42..f0afa29 100644
--- a/fs/nfs/nfs3proc.c
+++ b/fs/nfs/nfs3proc.c
@@ -885,7 +885,7 @@
.getattr = nfs_getattr,
.setattr = nfs_setattr,
#ifdef CONFIG_NFS_V3_ACL
- .listxattr = generic_listxattr,
+ .listxattr = nfs3_listxattr,
.getxattr = generic_getxattr,
.setxattr = generic_setxattr,
.removexattr = generic_removexattr,
@@ -899,7 +899,7 @@
.getattr = nfs_getattr,
.setattr = nfs_setattr,
#ifdef CONFIG_NFS_V3_ACL
- .listxattr = generic_listxattr,
+ .listxattr = nfs3_listxattr,
.getxattr = generic_getxattr,
.setxattr = generic_setxattr,
.removexattr = generic_removexattr,
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index b6ee3a6..17fab89 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -29,8 +29,6 @@
static struct kmem_cache *nfs_page_cachep;
static const struct rpc_call_ops nfs_pgio_common_ops;
-static void nfs_free_request(struct nfs_page *);
-
static bool nfs_pgarray_set(struct nfs_page_array *p, unsigned int pagecount)
{
p->npages = pagecount;
@@ -239,20 +237,28 @@
WARN_ON_ONCE(prev == req);
if (!prev) {
+ /* a head request */
req->wb_head = req;
req->wb_this_page = req;
} else {
+ /* a subrequest */
WARN_ON_ONCE(prev->wb_this_page != prev->wb_head);
WARN_ON_ONCE(!test_bit(PG_HEADLOCK, &prev->wb_head->wb_flags));
req->wb_head = prev->wb_head;
req->wb_this_page = prev->wb_this_page;
prev->wb_this_page = req;
+ /* All subrequests take a ref on the head request until
+ * nfs_page_group_destroy is called */
+ kref_get(&req->wb_head->wb_kref);
+
/* grab extra ref if head request has extra ref from
* the write/commit path to handle handoff between write
* and commit lists */
- if (test_bit(PG_INODE_REF, &prev->wb_head->wb_flags))
+ if (test_bit(PG_INODE_REF, &prev->wb_head->wb_flags)) {
+ set_bit(PG_INODE_REF, &req->wb_flags);
kref_get(&req->wb_kref);
+ }
}
}
@@ -269,6 +275,10 @@
struct nfs_page *req = container_of(kref, struct nfs_page, wb_kref);
struct nfs_page *tmp, *next;
+ /* subrequests must release the ref on the head request */
+ if (req->wb_head != req)
+ nfs_release_request(req->wb_head);
+
if (!nfs_page_group_sync_on_bit(req, PG_TEARDOWN))
return;
@@ -394,7 +404,7 @@
*
* Note: Should never be called with the spinlock held!
*/
-static void nfs_free_request(struct nfs_page *req)
+void nfs_free_request(struct nfs_page *req)
{
WARN_ON_ONCE(req->wb_this_page != req);
@@ -925,7 +935,6 @@
nfs_pageio_doio(desc);
if (desc->pg_error < 0)
return 0;
- desc->pg_moreio = 0;
if (desc->pg_recoalesce)
return 0;
/* retry add_request for this subreq */
@@ -972,6 +981,7 @@
desc->pg_count = 0;
desc->pg_base = 0;
desc->pg_recoalesce = 0;
+ desc->pg_moreio = 0;
while (!list_empty(&head)) {
struct nfs_page *req;
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 98ff061..5e2f1030 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -46,6 +46,7 @@
static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops;
static const struct nfs_commit_completion_ops nfs_commit_completion_ops;
static const struct nfs_rw_ops nfs_rw_write_ops;
+static void nfs_clear_request_commit(struct nfs_page *req);
static struct kmem_cache *nfs_wdata_cachep;
static mempool_t *nfs_wdata_mempool;
@@ -91,8 +92,15 @@
set_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags);
}
+/*
+ * nfs_page_find_head_request_locked - find head request associated with @page
+ *
+ * must be called while holding the inode lock.
+ *
+ * returns matching head request with reference held, or NULL if not found.
+ */
static struct nfs_page *
-nfs_page_find_request_locked(struct nfs_inode *nfsi, struct page *page)
+nfs_page_find_head_request_locked(struct nfs_inode *nfsi, struct page *page)
{
struct nfs_page *req = NULL;
@@ -104,25 +112,33 @@
/* Linearly search the commit list for the correct req */
list_for_each_entry_safe(freq, t, &nfsi->commit_info.list, wb_list) {
if (freq->wb_page == page) {
- req = freq;
+ req = freq->wb_head;
break;
}
}
}
- if (req)
+ if (req) {
+ WARN_ON_ONCE(req->wb_head != req);
+
kref_get(&req->wb_kref);
+ }
return req;
}
-static struct nfs_page *nfs_page_find_request(struct page *page)
+/*
+ * nfs_page_find_head_request - find head request associated with @page
+ *
+ * returns matching head request with reference held, or NULL if not found.
+ */
+static struct nfs_page *nfs_page_find_head_request(struct page *page)
{
struct inode *inode = page_file_mapping(page)->host;
struct nfs_page *req = NULL;
spin_lock(&inode->i_lock);
- req = nfs_page_find_request_locked(NFS_I(inode), page);
+ req = nfs_page_find_head_request_locked(NFS_I(inode), page);
spin_unlock(&inode->i_lock);
return req;
}
@@ -274,36 +290,246 @@
clear_bdi_congested(&nfss->backing_dev_info, BLK_RW_ASYNC);
}
-static struct nfs_page *nfs_find_and_lock_request(struct page *page, bool nonblock)
+
+/* nfs_page_group_clear_bits
+ * @req - an nfs request
+ * clears all page group related bits from @req
+ */
+static void
+nfs_page_group_clear_bits(struct nfs_page *req)
{
- struct inode *inode = page_file_mapping(page)->host;
- struct nfs_page *req;
+ clear_bit(PG_TEARDOWN, &req->wb_flags);
+ clear_bit(PG_UNLOCKPAGE, &req->wb_flags);
+ clear_bit(PG_UPTODATE, &req->wb_flags);
+ clear_bit(PG_WB_END, &req->wb_flags);
+ clear_bit(PG_REMOVE, &req->wb_flags);
+}
+
+
+/*
+ * nfs_unroll_locks_and_wait - unlock all newly locked reqs and wait on @req
+ *
+ * this is a helper function for nfs_lock_and_join_requests
+ *
+ * @inode - inode associated with request page group, must be holding inode lock
+ * @head - head request of page group, must be holding head lock
+ * @req - request that couldn't lock and needs to wait on the req bit lock
+ * @nonblock - if true, don't actually wait
+ *
+ * NOTE: this must be called holding page_group bit lock and inode spin lock
+ * and BOTH will be released before returning.
+ *
+ * returns 0 on success, < 0 on error.
+ */
+static int
+nfs_unroll_locks_and_wait(struct inode *inode, struct nfs_page *head,
+ struct nfs_page *req, bool nonblock)
+ __releases(&inode->i_lock)
+{
+ struct nfs_page *tmp;
int ret;
- spin_lock(&inode->i_lock);
- for (;;) {
- req = nfs_page_find_request_locked(NFS_I(inode), page);
- if (req == NULL)
- break;
- if (nfs_lock_request(req))
- break;
- /* Note: If we hold the page lock, as is the case in nfs_writepage,
- * then the call to nfs_lock_request() will always
- * succeed provided that someone hasn't already marked the
- * request as dirty (in which case we don't care).
- */
- spin_unlock(&inode->i_lock);
- if (!nonblock)
- ret = nfs_wait_on_request(req);
- else
- ret = -EAGAIN;
- nfs_release_request(req);
- if (ret != 0)
- return ERR_PTR(ret);
- spin_lock(&inode->i_lock);
- }
+ /* relinquish all the locks successfully grabbed this run */
+ for (tmp = head ; tmp != req; tmp = tmp->wb_this_page)
+ nfs_unlock_request(tmp);
+
+ WARN_ON_ONCE(test_bit(PG_TEARDOWN, &req->wb_flags));
+
+ /* grab a ref on the request that will be waited on */
+ kref_get(&req->wb_kref);
+
+ nfs_page_group_unlock(head);
spin_unlock(&inode->i_lock);
- return req;
+
+ /* release ref from nfs_page_find_head_request_locked */
+ nfs_release_request(head);
+
+ if (!nonblock)
+ ret = nfs_wait_on_request(req);
+ else
+ ret = -EAGAIN;
+ nfs_release_request(req);
+
+ return ret;
+}
+
+/*
+ * nfs_destroy_unlinked_subrequests - destroy recently unlinked subrequests
+ *
+ * @destroy_list - request list (using wb_this_page) terminated by @old_head
+ * @old_head - the old head of the list
+ *
+ * All subrequests must be locked and removed from all lists, so at this point
+ * they are only "active" in this function, and possibly in nfs_wait_on_request
+ * with a reference held by some other context.
+ */
+static void
+nfs_destroy_unlinked_subrequests(struct nfs_page *destroy_list,
+ struct nfs_page *old_head)
+{
+ while (destroy_list) {
+ struct nfs_page *subreq = destroy_list;
+
+ destroy_list = (subreq->wb_this_page == old_head) ?
+ NULL : subreq->wb_this_page;
+
+ WARN_ON_ONCE(old_head != subreq->wb_head);
+
+ /* make sure old group is not used */
+ subreq->wb_head = subreq;
+ subreq->wb_this_page = subreq;
+
+ nfs_clear_request_commit(subreq);
+
+ /* subreq is now totally disconnected from page group or any
+ * write / commit lists. last chance to wake any waiters */
+ nfs_unlock_request(subreq);
+
+ if (!test_bit(PG_TEARDOWN, &subreq->wb_flags)) {
+ /* release ref on old head request */
+ nfs_release_request(old_head);
+
+ nfs_page_group_clear_bits(subreq);
+
+ /* release the PG_INODE_REF reference */
+ if (test_and_clear_bit(PG_INODE_REF, &subreq->wb_flags))
+ nfs_release_request(subreq);
+ else
+ WARN_ON_ONCE(1);
+ } else {
+ WARN_ON_ONCE(test_bit(PG_CLEAN, &subreq->wb_flags));
+ /* zombie requests have already released the last
+ * reference and were waiting on the rest of the
+ * group to complete. Since it's no longer part of a
+ * group, simply free the request */
+ nfs_page_group_clear_bits(subreq);
+ nfs_free_request(subreq);
+ }
+ }
+}
+
+/*
+ * nfs_lock_and_join_requests - join all subreqs to the head req and return
+ * a locked reference, cancelling any pending
+ * operations for this page.
+ *
+ * @page - the page used to lookup the "page group" of nfs_page structures
+ * @nonblock - if true, don't block waiting for request locks
+ *
+ * This function joins all sub requests to the head request by first
+ * locking all requests in the group, cancelling any pending operations
+ * and finally updating the head request to cover the whole range covered by
+ * the (former) group. All subrequests are removed from any write or commit
+ * lists, unlinked from the group and destroyed.
+ *
+ * Returns a locked, referenced pointer to the head request - which after
+ * this call is guaranteed to be the only request associated with the page.
+ * Returns NULL if no requests are found for @page, or a ERR_PTR if an
+ * error was encountered.
+ */
+static struct nfs_page *
+nfs_lock_and_join_requests(struct page *page, bool nonblock)
+{
+ struct inode *inode = page_file_mapping(page)->host;
+ struct nfs_page *head, *subreq;
+ struct nfs_page *destroy_list = NULL;
+ unsigned int total_bytes;
+ int ret;
+
+try_again:
+ total_bytes = 0;
+
+ WARN_ON_ONCE(destroy_list);
+
+ spin_lock(&inode->i_lock);
+
+ /*
+ * A reference is taken only on the head request which acts as a
+ * reference to the whole page group - the group will not be destroyed
+ * until the head reference is released.
+ */
+ head = nfs_page_find_head_request_locked(NFS_I(inode), page);
+
+ if (!head) {
+ spin_unlock(&inode->i_lock);
+ return NULL;
+ }
+
+ /* lock each request in the page group */
+ nfs_page_group_lock(head);
+ subreq = head;
+ do {
+ /*
+ * Subrequests are always contiguous, non overlapping
+ * and in order. If not, it's a programming error.
+ */
+ WARN_ON_ONCE(subreq->wb_offset !=
+ (head->wb_offset + total_bytes));
+
+ /* keep track of how many bytes this group covers */
+ total_bytes += subreq->wb_bytes;
+
+ if (!nfs_lock_request(subreq)) {
+ /* releases page group bit lock and
+ * inode spin lock and all references */
+ ret = nfs_unroll_locks_and_wait(inode, head,
+ subreq, nonblock);
+
+ if (ret == 0)
+ goto try_again;
+
+ return ERR_PTR(ret);
+ }
+
+ subreq = subreq->wb_this_page;
+ } while (subreq != head);
+
+ /* Now that all requests are locked, make sure they aren't on any list.
+ * Commit list removal accounting is done after locks are dropped */
+ subreq = head;
+ do {
+ nfs_list_remove_request(subreq);
+ subreq = subreq->wb_this_page;
+ } while (subreq != head);
+
+ /* unlink subrequests from head, destroy them later */
+ if (head->wb_this_page != head) {
+ /* destroy list will be terminated by head */
+ destroy_list = head->wb_this_page;
+ head->wb_this_page = head;
+
+ /* change head request to cover whole range that
+ * the former page group covered */
+ head->wb_bytes = total_bytes;
+ }
+
+ /*
+ * prepare head request to be added to new pgio descriptor
+ */
+ nfs_page_group_clear_bits(head);
+
+ /*
+ * some part of the group was still on the inode list - otherwise
+ * the group wouldn't be involved in async write.
+ * grab a reference for the head request, iff it needs one.
+ */
+ if (!test_and_set_bit(PG_INODE_REF, &head->wb_flags))
+ kref_get(&head->wb_kref);
+
+ nfs_page_group_unlock(head);
+
+ /* drop lock to clear_request_commit the head req and clean up
+ * requests on destroy list */
+ spin_unlock(&inode->i_lock);
+
+ nfs_destroy_unlinked_subrequests(destroy_list, head);
+
+ /* clean up commit list state */
+ nfs_clear_request_commit(head);
+
+ /* still holds ref on head from nfs_page_find_head_request_locked
+ * and still has lock on head from lock loop */
+ return head;
}
/*
@@ -316,7 +542,7 @@
struct nfs_page *req;
int ret = 0;
- req = nfs_find_and_lock_request(page, nonblock);
+ req = nfs_lock_and_join_requests(page, nonblock);
if (!req)
goto out;
ret = PTR_ERR(req);
@@ -448,7 +674,9 @@
set_page_private(req->wb_page, (unsigned long)req);
}
nfsi->npages++;
- set_bit(PG_INODE_REF, &req->wb_flags);
+ /* this a head request for a page group - mark it as having an
+ * extra reference so sub groups can follow suit */
+ WARN_ON(test_and_set_bit(PG_INODE_REF, &req->wb_flags));
kref_get(&req->wb_kref);
spin_unlock(&inode->i_lock);
}
@@ -474,7 +702,9 @@
nfsi->npages--;
spin_unlock(&inode->i_lock);
}
- nfs_release_request(req);
+
+ if (test_and_clear_bit(PG_INODE_REF, &req->wb_flags))
+ nfs_release_request(req);
}
static void
@@ -638,7 +868,6 @@
{
struct nfs_commit_info cinfo;
unsigned long bytes = 0;
- bool do_destroy;
if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
goto out;
@@ -668,7 +897,6 @@
next:
nfs_unlock_request(req);
nfs_end_page_writeback(req);
- do_destroy = !test_bit(NFS_IOHDR_NEED_COMMIT, &hdr->flags);
nfs_release_request(req);
}
out:
@@ -769,7 +997,7 @@
spin_lock(&inode->i_lock);
for (;;) {
- req = nfs_page_find_request_locked(NFS_I(inode), page);
+ req = nfs_page_find_head_request_locked(NFS_I(inode), page);
if (req == NULL)
goto out_unlock;
@@ -877,7 +1105,7 @@
* dropped page.
*/
do {
- req = nfs_page_find_request(page);
+ req = nfs_page_find_head_request(page);
if (req == NULL)
return 0;
l_ctx = req->wb_lock_context;
@@ -1569,27 +1797,28 @@
struct nfs_page *req;
int ret = 0;
- for (;;) {
- wait_on_page_writeback(page);
- req = nfs_page_find_request(page);
- if (req == NULL)
- break;
- if (nfs_lock_request(req)) {
- nfs_clear_request_commit(req);
- nfs_inode_remove_request(req);
- /*
- * In case nfs_inode_remove_request has marked the
- * page as being dirty
- */
- cancel_dirty_page(page, PAGE_CACHE_SIZE);
- nfs_unlock_and_release_request(req);
- break;
- }
- ret = nfs_wait_on_request(req);
- nfs_release_request(req);
- if (ret < 0)
- break;
+ wait_on_page_writeback(page);
+
+ /* blocking call to cancel all requests and join to a single (head)
+ * request */
+ req = nfs_lock_and_join_requests(page, false);
+
+ if (IS_ERR(req)) {
+ ret = PTR_ERR(req);
+ } else if (req) {
+ /* all requests from this page have been cancelled by
+ * nfs_lock_and_join_requests, so just remove the head
+ * request from the inode / page_private pointer and
+ * release it */
+ nfs_inode_remove_request(req);
+ /*
+ * In case nfs_inode_remove_request has marked the
+ * page as being dirty
+ */
+ cancel_dirty_page(page, PAGE_CACHE_SIZE);
+ nfs_unlock_and_release_request(req);
}
+
return ret;
}
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index b56b1cc0..944275c 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -2879,6 +2879,7 @@
* return the conflicting open:
*/
if (conf->len) {
+ kfree(conf->data);
conf->len = 0;
conf->data = NULL;
goto again;
@@ -2891,6 +2892,7 @@
if (conf->len) {
p = xdr_encode_opaque_fixed(p, &ld->ld_clientid, 8);
p = xdr_encode_opaque(p, conf->data, conf->len);
+ kfree(conf->data);
} else { /* non - nfsv4 lock in conflict, no clientid nor owner */
p = xdr_encode_hyper(p, (u64)0); /* clientid */
*p++ = cpu_to_be32(0); /* length of owner name */
@@ -2907,7 +2909,7 @@
nfserr = nfsd4_encode_stateid(xdr, &lock->lk_resp_stateid);
else if (nfserr == nfserr_denied)
nfserr = nfsd4_encode_lock_denied(xdr, &lock->lk_denied);
- kfree(lock->lk_denied.ld_owner.data);
+
return nfserr;
}
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index 9cd5f63..7f30bdc 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -702,6 +702,7 @@
struct dquot *dquot;
unsigned long freed = 0;
+ spin_lock(&dq_list_lock);
head = free_dquots.prev;
while (head != &free_dquots && sc->nr_to_scan) {
dquot = list_entry(head, struct dquot, dq_free);
@@ -713,6 +714,7 @@
freed++;
head = free_dquots.prev;
}
+ spin_unlock(&dq_list_lock);
return freed;
}
diff --git a/fs/xattr.c b/fs/xattr.c
index 3377dff..c69e6d4 100644
--- a/fs/xattr.c
+++ b/fs/xattr.c
@@ -843,7 +843,7 @@
/* wrap around? */
len = sizeof(*new_xattr) + size;
- if (len <= sizeof(*new_xattr))
+ if (len < sizeof(*new_xattr))
return NULL;
new_xattr = kmalloc(len, GFP_KERNEL);
diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
index 96175df..75c3fe5 100644
--- a/fs/xfs/xfs_bmap.c
+++ b/fs/xfs/xfs_bmap.c
@@ -4298,8 +4298,8 @@
}
-int
-__xfs_bmapi_allocate(
+static int
+xfs_bmapi_allocate(
struct xfs_bmalloca *bma)
{
struct xfs_mount *mp = bma->ip->i_mount;
@@ -4578,9 +4578,6 @@
bma.flist = flist;
bma.firstblock = firstblock;
- if (flags & XFS_BMAPI_STACK_SWITCH)
- bma.stack_switch = 1;
-
while (bno < end && n < *nmap) {
inhole = eof || bma.got.br_startoff > bno;
wasdelay = !inhole && isnullstartblock(bma.got.br_startblock);
diff --git a/fs/xfs/xfs_bmap.h b/fs/xfs/xfs_bmap.h
index 38ba36e..b879ca5 100644
--- a/fs/xfs/xfs_bmap.h
+++ b/fs/xfs/xfs_bmap.h
@@ -77,7 +77,6 @@
* from written to unwritten, otherwise convert from unwritten to written.
*/
#define XFS_BMAPI_CONVERT 0x040
-#define XFS_BMAPI_STACK_SWITCH 0x080
#define XFS_BMAPI_FLAGS \
{ XFS_BMAPI_ENTIRE, "ENTIRE" }, \
@@ -86,8 +85,7 @@
{ XFS_BMAPI_PREALLOC, "PREALLOC" }, \
{ XFS_BMAPI_IGSTATE, "IGSTATE" }, \
{ XFS_BMAPI_CONTIG, "CONTIG" }, \
- { XFS_BMAPI_CONVERT, "CONVERT" }, \
- { XFS_BMAPI_STACK_SWITCH, "STACK_SWITCH" }
+ { XFS_BMAPI_CONVERT, "CONVERT" }
static inline int xfs_bmapi_aflag(int w)
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index 703b3ec..64731ef 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -249,59 +249,6 @@
}
/*
- * Stack switching interfaces for allocation
- */
-static void
-xfs_bmapi_allocate_worker(
- struct work_struct *work)
-{
- struct xfs_bmalloca *args = container_of(work,
- struct xfs_bmalloca, work);
- unsigned long pflags;
- unsigned long new_pflags = PF_FSTRANS;
-
- /*
- * we are in a transaction context here, but may also be doing work
- * in kswapd context, and hence we may need to inherit that state
- * temporarily to ensure that we don't block waiting for memory reclaim
- * in any way.
- */
- if (args->kswapd)
- new_pflags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
-
- current_set_flags_nested(&pflags, new_pflags);
-
- args->result = __xfs_bmapi_allocate(args);
- complete(args->done);
-
- current_restore_flags_nested(&pflags, new_pflags);
-}
-
-/*
- * Some allocation requests often come in with little stack to work on. Push
- * them off to a worker thread so there is lots of stack to use. Otherwise just
- * call directly to avoid the context switch overhead here.
- */
-int
-xfs_bmapi_allocate(
- struct xfs_bmalloca *args)
-{
- DECLARE_COMPLETION_ONSTACK(done);
-
- if (!args->stack_switch)
- return __xfs_bmapi_allocate(args);
-
-
- args->done = &done;
- args->kswapd = current_is_kswapd();
- INIT_WORK_ONSTACK(&args->work, xfs_bmapi_allocate_worker);
- queue_work(xfs_alloc_wq, &args->work);
- wait_for_completion(&done);
- destroy_work_on_stack(&args->work);
- return args->result;
-}
-
-/*
* Check if the endoff is outside the last extent. If so the caller will grow
* the allocation to a stripe unit boundary. All offsets are considered outside
* the end of file for an empty fork, so 1 is returned in *eof in that case.
diff --git a/fs/xfs/xfs_bmap_util.h b/fs/xfs/xfs_bmap_util.h
index 075f722..2fdb72d 100644
--- a/fs/xfs/xfs_bmap_util.h
+++ b/fs/xfs/xfs_bmap_util.h
@@ -55,8 +55,6 @@
bool userdata;/* set if is user data */
bool aeof; /* allocated space at eof */
bool conv; /* overwriting unwritten extents */
- bool stack_switch;
- bool kswapd; /* allocation in kswapd context */
int flags;
struct completion *done;
struct work_struct work;
@@ -66,8 +64,6 @@
int xfs_bmap_finish(struct xfs_trans **tp, struct xfs_bmap_free *flist,
int *committed);
int xfs_bmap_rtalloc(struct xfs_bmalloca *ap);
-int xfs_bmapi_allocate(struct xfs_bmalloca *args);
-int __xfs_bmapi_allocate(struct xfs_bmalloca *args);
int xfs_bmap_eof(struct xfs_inode *ip, xfs_fileoff_t endoff,
int whichfork, int *eof);
int xfs_bmap_count_blocks(struct xfs_trans *tp, struct xfs_inode *ip,
diff --git a/fs/xfs/xfs_btree.c b/fs/xfs/xfs_btree.c
index bf810c6..cf893bc 100644
--- a/fs/xfs/xfs_btree.c
+++ b/fs/xfs/xfs_btree.c
@@ -33,6 +33,7 @@
#include "xfs_error.h"
#include "xfs_trace.h"
#include "xfs_cksum.h"
+#include "xfs_alloc.h"
/*
* Cursor allocation zone.
@@ -2323,7 +2324,7 @@
* record (to be inserted into parent).
*/
STATIC int /* error */
-xfs_btree_split(
+__xfs_btree_split(
struct xfs_btree_cur *cur,
int level,
union xfs_btree_ptr *ptrp,
@@ -2503,6 +2504,85 @@
return error;
}
+struct xfs_btree_split_args {
+ struct xfs_btree_cur *cur;
+ int level;
+ union xfs_btree_ptr *ptrp;
+ union xfs_btree_key *key;
+ struct xfs_btree_cur **curp;
+ int *stat; /* success/failure */
+ int result;
+ bool kswapd; /* allocation in kswapd context */
+ struct completion *done;
+ struct work_struct work;
+};
+
+/*
+ * Stack switching interfaces for allocation
+ */
+static void
+xfs_btree_split_worker(
+ struct work_struct *work)
+{
+ struct xfs_btree_split_args *args = container_of(work,
+ struct xfs_btree_split_args, work);
+ unsigned long pflags;
+ unsigned long new_pflags = PF_FSTRANS;
+
+ /*
+ * we are in a transaction context here, but may also be doing work
+ * in kswapd context, and hence we may need to inherit that state
+ * temporarily to ensure that we don't block waiting for memory reclaim
+ * in any way.
+ */
+ if (args->kswapd)
+ new_pflags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
+
+ current_set_flags_nested(&pflags, new_pflags);
+
+ args->result = __xfs_btree_split(args->cur, args->level, args->ptrp,
+ args->key, args->curp, args->stat);
+ complete(args->done);
+
+ current_restore_flags_nested(&pflags, new_pflags);
+}
+
+/*
+ * BMBT split requests often come in with little stack to work on. Push
+ * them off to a worker thread so there is lots of stack to use. For the other
+ * btree types, just call directly to avoid the context switch overhead here.
+ */
+STATIC int /* error */
+xfs_btree_split(
+ struct xfs_btree_cur *cur,
+ int level,
+ union xfs_btree_ptr *ptrp,
+ union xfs_btree_key *key,
+ struct xfs_btree_cur **curp,
+ int *stat) /* success/failure */
+{
+ struct xfs_btree_split_args args;
+ DECLARE_COMPLETION_ONSTACK(done);
+
+ if (cur->bc_btnum != XFS_BTNUM_BMAP)
+ return __xfs_btree_split(cur, level, ptrp, key, curp, stat);
+
+ args.cur = cur;
+ args.level = level;
+ args.ptrp = ptrp;
+ args.key = key;
+ args.curp = curp;
+ args.stat = stat;
+ args.done = &done;
+ args.kswapd = current_is_kswapd();
+ INIT_WORK_ONSTACK(&args.work, xfs_btree_split_worker);
+ queue_work(xfs_alloc_wq, &args.work);
+ wait_for_completion(&done);
+ destroy_work_on_stack(&args.work);
+ return args.result;
+}
+
+
/*
* Copy the old inode root contents into a real block and make the
* broot point to it.
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index 6c5eb4c..6d3ec2b 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -749,8 +749,7 @@
* pointer that the caller gave to us.
*/
error = xfs_bmapi_write(tp, ip, map_start_fsb,
- count_fsb,
- XFS_BMAPI_STACK_SWITCH,
+ count_fsb, 0,
&first_block, 1,
imap, &nimaps, &free_list);
if (error)
diff --git a/fs/xfs/xfs_sb.c b/fs/xfs/xfs_sb.c
index c3453b1..7703fa6 100644
--- a/fs/xfs/xfs_sb.c
+++ b/fs/xfs/xfs_sb.c
@@ -483,10 +483,16 @@
}
/*
- * GQUOTINO and PQUOTINO cannot be used together in versions
- * of superblock that do not have pquotino. from->sb_flags
- * tells us which quota is active and should be copied to
- * disk.
+ * GQUOTINO and PQUOTINO cannot be used together in versions of
+ * superblock that do not have pquotino. from->sb_flags tells us which
+ * quota is active and should be copied to disk. If neither are active,
+ * make sure we write NULLFSINO to the sb_gquotino field as a quota
+ * inode value of "0" is invalid when the XFS_SB_VERSION_QUOTA feature
+ * bit is set.
+ *
+ * Note that we don't need to handle the sb_uquotino or sb_pquotino here
+ * as they do not require any translation. Hence the main sb field loop
+ * will write them appropriately from the in-core superblock.
*/
if ((*fields & XFS_SB_GQUOTINO) &&
(from->sb_qflags & XFS_GQUOTA_ACCT))
@@ -494,6 +500,17 @@
else if ((*fields & XFS_SB_PQUOTINO) &&
(from->sb_qflags & XFS_PQUOTA_ACCT))
to->sb_gquotino = cpu_to_be64(from->sb_pquotino);
+ else {
+ /*
+ * We can't rely on just the fields being logged to tell us
+ * that it is safe to write NULLFSINO - we should only do that
+ * if quotas are not actually enabled. Hence only write
+ * NULLFSINO if both in-core quota inodes are NULL.
+ */
+ if (from->sb_gquotino == NULLFSINO &&
+ from->sb_pquotino == NULLFSINO)
+ to->sb_gquotino = cpu_to_be64(NULLFSINO);
+ }
*fields &= ~(XFS_SB_PQUOTINO | XFS_SB_GQUOTINO);
}
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index b571458..bcfd808 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -315,12 +315,19 @@
u8 notifier_present:1; /* Wake-up notify handler has been installed */
};
+struct acpi_device_wakeup_context {
+ struct work_struct work;
+ struct device *dev;
+};
+
struct acpi_device_wakeup {
acpi_handle gpe_device;
u64 gpe_number;
u64 sleep_state;
struct list_head resources;
struct acpi_device_wakeup_flags flags;
+ struct acpi_device_wakeup_context context;
+ struct wakeup_source *ws;
int prepare_count;
};
@@ -372,15 +379,9 @@
}
static inline void acpi_set_hp_context(struct acpi_device *adev,
- struct acpi_hotplug_context *hp,
- int (*notify)(struct acpi_device *, u32),
- void (*uevent)(struct acpi_device *, u32),
- void (*fixup)(struct acpi_device *))
+ struct acpi_hotplug_context *hp)
{
hp->self = adev;
- hp->notify = notify;
- hp->uevent = uevent;
- hp->fixup = fixup;
adev->hp = hp;
}
@@ -487,6 +488,8 @@
};
int register_acpi_bus_type(struct acpi_bus_type *);
int unregister_acpi_bus_type(struct acpi_bus_type *);
+int acpi_bind_one(struct device *dev, struct acpi_device *adev);
+int acpi_unbind_one(struct device *dev);
struct acpi_pci_root {
struct acpi_device * device;
@@ -510,20 +513,18 @@
int acpi_disable_wakeup_device_power(struct acpi_device *dev);
#ifdef CONFIG_PM
-acpi_status acpi_add_pm_notifier(struct acpi_device *adev,
- acpi_notify_handler handler, void *context);
-acpi_status acpi_remove_pm_notifier(struct acpi_device *adev,
- acpi_notify_handler handler);
+acpi_status acpi_add_pm_notifier(struct acpi_device *adev, struct device *dev,
+ void (*work_func)(struct work_struct *work));
+acpi_status acpi_remove_pm_notifier(struct acpi_device *adev);
int acpi_pm_device_sleep_state(struct device *, int *, int);
#else
static inline acpi_status acpi_add_pm_notifier(struct acpi_device *adev,
- acpi_notify_handler handler,
- void *context)
+ struct device *dev,
+ void (*work_func)(struct work_struct *work))
{
return AE_SUPPORT;
}
-static inline acpi_status acpi_remove_pm_notifier(struct acpi_device *adev,
- acpi_notify_handler handler)
+static inline acpi_status acpi_remove_pm_notifier(struct acpi_device *adev)
{
return AE_SUPPORT;
}
@@ -538,13 +539,8 @@
#endif
#ifdef CONFIG_PM_RUNTIME
-int __acpi_device_run_wake(struct acpi_device *, bool);
int acpi_pm_device_run_wake(struct device *, bool);
#else
-static inline int __acpi_device_run_wake(struct acpi_device *adev, bool en)
-{
- return -ENODEV;
-}
static inline int acpi_pm_device_run_wake(struct device *dev, bool enable)
{
return -ENODEV;
@@ -552,14 +548,8 @@
#endif
#ifdef CONFIG_PM_SLEEP
-int __acpi_device_sleep_wake(struct acpi_device *, u32, bool);
int acpi_pm_device_sleep_wake(struct device *, bool);
#else
-static inline int __acpi_device_sleep_wake(struct acpi_device *adev,
- u32 target_state, bool enable)
-{
- return -ENODEV;
-}
static inline int acpi_pm_device_sleep_wake(struct device *dev, bool enable)
{
return -ENODEV;
diff --git a/include/acpi/acpiosxf.h b/include/acpi/acpiosxf.h
index f6f5f8a..03b3e6d 100644
--- a/include/acpi/acpiosxf.h
+++ b/include/acpi/acpiosxf.h
@@ -399,4 +399,35 @@
void acpi_os_close_directory(void *dir_handle);
#endif
+/*
+ * File I/O and related support
+ */
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_open_file
+ACPI_FILE acpi_os_open_file(const char *path, u8 modes);
+#endif
+
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_close_file
+void acpi_os_close_file(ACPI_FILE file);
+#endif
+
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_read_file
+int
+acpi_os_read_file(ACPI_FILE file,
+ void *buffer, acpi_size size, acpi_size count);
+#endif
+
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_write_file
+int
+acpi_os_write_file(ACPI_FILE file,
+ void *buffer, acpi_size size, acpi_size count);
+#endif
+
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_get_file_offset
+long acpi_os_get_file_offset(ACPI_FILE file);
+#endif
+
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_set_file_offset
+acpi_status acpi_os_set_file_offset(ACPI_FILE file, long offset, u8 from);
+#endif
+
#endif /* __ACPIOSXF_H__ */
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index 35b525c..c3f38bc 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -46,15 +46,13 @@
/* Current ACPICA subsystem version in YYYYMMDD format */
-#define ACPI_CA_VERSION 0x20140424
+#define ACPI_CA_VERSION 0x20140627
#include <acpi/acconfig.h>
#include <acpi/actypes.h>
#include <acpi/actbl.h>
#include <acpi/acbuffer.h>
-extern u8 acpi_gbl_permanent_mmap;
-
/*****************************************************************************
*
* Macros used for ACPICA globals and configuration
@@ -335,6 +333,23 @@
#endif /* ACPI_DEBUG_OUTPUT */
+/*
+ * Application prototypes
+ *
+ * All interfaces used by application will be configured
+ * out of the ACPICA build unless the ACPI_APPLICATION
+ * flag is defined.
+ */
+#ifdef ACPI_APPLICATION
+#define ACPI_APP_DEPENDENT_RETURN_VOID(prototype) \
+ prototype;
+
+#else
+#define ACPI_APP_DEPENDENT_RETURN_VOID(prototype) \
+ static ACPI_INLINE prototype {return;}
+
+#endif /* ACPI_APPLICATION */
+
/*****************************************************************************
*
* ACPICA public interface prototypes
@@ -658,6 +673,10 @@
u32 gpe_number))
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
+ acpi_mark_gpe_for_wake(acpi_handle gpe_device,
+ u32 gpe_number))
+
+ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
acpi_setup_gpe_for_wake(acpi_handle
parent_device,
acpi_handle gpe_device,
@@ -861,21 +880,32 @@
const char *module_name,
u32 component_id,
const char *format, ...))
+ACPI_APP_DEPENDENT_RETURN_VOID(ACPI_PRINTF_LIKE(1)
+ void ACPI_INTERNAL_VAR_XFACE
+ acpi_log_error(const char *format, ...))
/*
* Divergences
*/
-acpi_status acpi_get_id(acpi_handle object, acpi_owner_id * out_type);
+ACPI_GLOBAL(u8, acpi_gbl_permanent_mmap);
-acpi_status acpi_unload_table_id(acpi_owner_id id);
+ACPI_EXTERNAL_RETURN_STATUS(acpi_status
+ acpi_get_id(acpi_handle object,
+ acpi_owner_id * out_type))
-acpi_status
-acpi_get_table_with_size(acpi_string signature,
- u32 instance, struct acpi_table_header **out_table,
- acpi_size *tbl_size);
+ACPI_EXTERNAL_RETURN_STATUS(acpi_status acpi_unload_table_id(acpi_owner_id id))
-acpi_status
-acpi_get_data_full(acpi_handle object, acpi_object_handler handler, void **data,
- void (*callback)(void *));
+ACPI_EXTERNAL_RETURN_STATUS(acpi_status
+ acpi_get_table_with_size(acpi_string signature,
+ u32 instance,
+ struct acpi_table_header
+ **out_table,
+ acpi_size *tbl_size))
+
+ACPI_EXTERNAL_RETURN_STATUS(acpi_status
+ acpi_get_data_full(acpi_handle object,
+ acpi_object_handler handler,
+ void **data,
+ void (*callback)(void *)))
#endif /* __ACXFACE_H__ */
diff --git a/include/acpi/actbl1.h b/include/acpi/actbl1.h
index 4ad7da8..9613e8e 100644
--- a/include/acpi/actbl1.h
+++ b/include/acpi/actbl1.h
@@ -604,7 +604,7 @@
/* Generic Error Status block */
-struct acpi_generic_status {
+struct acpi_hest_generic_status {
u32 block_status;
u32 raw_data_offset;
u32 raw_data_length;
@@ -614,15 +614,15 @@
/* Values for block_status flags above */
-#define ACPI_GEN_ERR_UC BIT(0)
-#define ACPI_GEN_ERR_CE BIT(1)
-#define ACPI_GEN_ERR_MULTI_UC BIT(2)
-#define ACPI_GEN_ERR_MULTI_CE BIT(3)
-#define ACPI_GEN_ERR_COUNT_SHIFT (0xFF<<4) /* 8 bits, error count */
+#define ACPI_HEST_UNCORRECTABLE (1)
+#define ACPI_HEST_CORRECTABLE (1<<1)
+#define ACPI_HEST_MULTIPLE_UNCORRECTABLE (1<<2)
+#define ACPI_HEST_MULTIPLE_CORRECTABLE (1<<3)
+#define ACPI_HEST_ERROR_ENTRY_COUNT (0xFF<<4) /* 8 bits, error count */
/* Generic Error Data entry */
-struct acpi_generic_data {
+struct acpi_hest_generic_data {
u8 section_type[16];
u32 error_severity;
u16 revision;
diff --git a/include/acpi/actbl2.h b/include/acpi/actbl2.h
index 860e5c8..21314d3 100644
--- a/include/acpi/actbl2.h
+++ b/include/acpi/actbl2.h
@@ -516,7 +516,7 @@
struct acpi_dmar_header header;
u8 reserved[3];
u8 device_number;
- u8 object_name[];
+ char object_name[1];
};
/*******************************************************************************
diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
index 19b26bb..608a040 100644
--- a/include/acpi/actypes.h
+++ b/include/acpi/actypes.h
@@ -126,6 +126,7 @@
typedef unsigned char u8;
typedef unsigned char u8;
typedef unsigned short u16;
+typedef short s16;
typedef COMPILER_DEPENDENT_UINT64 u64;
typedef COMPILER_DEPENDENT_INT64 s64;
@@ -1244,4 +1245,17 @@
#define ACPI_OSI_WIN_7 0x0B
#define ACPI_OSI_WIN_8 0x0C
+/* Definitions of file IO */
+
+#define ACPI_FILE_READING 0x01
+#define ACPI_FILE_WRITING 0x02
+#define ACPI_FILE_BINARY 0x04
+
+#define ACPI_FILE_BEGIN 0x01
+#define ACPI_FILE_END 0x02
+
+/* Definitions of getopt */
+
+#define ACPI_OPT_END -1
+
#endif /* __ACTYPES_H__ */
diff --git a/include/acpi/ghes.h b/include/acpi/ghes.h
index dfd60d0..720446c 100644
--- a/include/acpi/ghes.h
+++ b/include/acpi/ghes.h
@@ -14,7 +14,7 @@
struct ghes {
struct acpi_hest_generic *generic;
- struct acpi_generic_status *estatus;
+ struct acpi_hest_generic_status *estatus;
u64 buffer_paddr;
unsigned long flags;
union {
diff --git a/include/acpi/platform/acenv.h b/include/acpi/platform/acenv.h
index e863dd5..5f8cc1f 100644
--- a/include/acpi/platform/acenv.h
+++ b/include/acpi/platform/acenv.h
@@ -87,20 +87,14 @@
#define ACPI_DBG_TRACK_ALLOCATIONS
#endif
-/* acpi_names configuration. Single threaded with debugger output enabled. */
-
-#ifdef ACPI_NAMES_APP
-#define ACPI_DEBUGGER
-#define ACPI_APPLICATION
-#define ACPI_SINGLE_THREADED
-#endif
-
/*
- * acpi_bin/acpi_dump/acpi_src/acpi_xtract/Example configuration. All single
- * threaded, with no debug output.
+ * acpi_bin/acpi_dump/acpi_help/acpi_names/acpi_src/acpi_xtract/Example configuration.
+ * All single threaded.
*/
#if (defined ACPI_BIN_APP) || \
(defined ACPI_DUMP_APP) || \
+ (defined ACPI_HELP_APP) || \
+ (defined ACPI_NAMES_APP) || \
(defined ACPI_SRC_APP) || \
(defined ACPI_XTRACT_APP) || \
(defined ACPI_EXAMPLE_APP)
@@ -108,12 +102,40 @@
#define ACPI_SINGLE_THREADED
#endif
+/* acpi_help configuration. Error messages disabled. */
+
#ifdef ACPI_HELP_APP
-#define ACPI_APPLICATION
-#define ACPI_SINGLE_THREADED
#define ACPI_NO_ERROR_MESSAGES
#endif
+/* acpi_names configuration. Debug output enabled. */
+
+#ifdef ACPI_NAMES_APP
+#define ACPI_DEBUG_OUTPUT
+#endif
+
+/* acpi_exec/acpi_names/Example configuration. Native RSDP used. */
+
+#if (defined ACPI_EXEC_APP) || \
+ (defined ACPI_EXAMPLE_APP) || \
+ (defined ACPI_NAMES_APP)
+#define ACPI_USE_NATIVE_RSDP_POINTER
+#endif
+
+/* acpi_dump configuration. Native mapping used if provied by OSPMs */
+
+#ifdef ACPI_DUMP_APP
+#define ACPI_USE_NATIVE_MEMORY_MAPPING
+#define USE_NATIVE_ALLOCATE_ZEROED
+#endif
+
+/* acpi_names/Example configuration. Hardware disabled */
+
+#if (defined ACPI_EXAMPLE_APP) || \
+ (defined ACPI_NAMES_APP)
+#define ACPI_REDUCED_HARDWARE 1
+#endif
+
/* Linkable ACPICA library */
#ifdef ACPI_LIBRARY
@@ -185,6 +207,9 @@
#elif defined(_AED_EFI)
#include "acefi.h"
+#elif defined(_GNU_EFI)
+#include "acefi.h"
+
#elif defined(__HAIKU__)
#include "achaiku.h"
@@ -399,8 +424,12 @@
#ifdef ACPI_APPLICATION
#include <stdio.h>
#define ACPI_FILE FILE *
+#define ACPI_FILE_OUT stdout
+#define ACPI_FILE_ERR stderr
#else
#define ACPI_FILE void *
+#define ACPI_FILE_OUT NULL
+#define ACPI_FILE_ERR NULL
#endif /* ACPI_APPLICATION */
#endif /* ACPI_FILE */
diff --git a/include/acpi/platform/aclinux.h b/include/acpi/platform/aclinux.h
index cd1f052..1ba7c19 100644
--- a/include/acpi/platform/aclinux.h
+++ b/include/acpi/platform/aclinux.h
@@ -44,6 +44,16 @@
#ifndef __ACLINUX_H__
#define __ACLINUX_H__
+#ifdef __KERNEL__
+
+/* ACPICA external files should not include ACPICA headers directly. */
+
+#if !defined(BUILDING_ACPICA) && !defined(_LINUX_ACPI_H)
+#error "Please don't include <acpi/acpi.h> directly, include <linux/acpi.h> instead."
+#endif
+
+#endif
+
/* Common (in-kernel/user-space) ACPICA configuration */
#define ACPI_USE_SYSTEM_CLIBRARY
@@ -70,7 +80,9 @@
#ifdef EXPORT_ACPI_INTERFACES
#include <linux/export.h>
#endif
+#ifdef CONFIG_ACPI
#include <asm/acenv.h>
+#endif
#ifndef CONFIG_ACPI
diff --git a/include/acpi/platform/aclinuxex.h b/include/acpi/platform/aclinuxex.h
index 191e741..568d4b8 100644
--- a/include/acpi/platform/aclinuxex.h
+++ b/include/acpi/platform/aclinuxex.h
@@ -46,6 +46,28 @@
#ifdef __KERNEL__
+#ifndef ACPI_USE_NATIVE_DIVIDE
+
+#ifndef ACPI_DIV_64_BY_32
+#define ACPI_DIV_64_BY_32(n_hi, n_lo, d32, q32, r32) \
+ do { \
+ u64 (__n) = ((u64) n_hi) << 32 | (n_lo); \
+ (r32) = do_div ((__n), (d32)); \
+ (q32) = (u32) (__n); \
+ } while (0)
+#endif
+
+#ifndef ACPI_SHIFT_RIGHT_64
+#define ACPI_SHIFT_RIGHT_64(n_hi, n_lo) \
+ do { \
+ (n_lo) >>= 1; \
+ (n_lo) |= (((n_hi) & 1) << 31); \
+ (n_hi) >>= 1; \
+ } while (0)
+#endif
+
+#endif
+
/*
* Overrides for in-kernel ACPICA
*/
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 358c01b..5320153 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -29,17 +29,17 @@
#include <linux/ioport.h> /* for struct resource */
#include <linux/device.h>
-#ifdef CONFIG_ACPI
-
#ifndef _LINUX
#define _LINUX
#endif
+#include <acpi/acpi.h>
+
+#ifdef CONFIG_ACPI
#include <linux/list.h>
#include <linux/mod_devicetable.h>
#include <linux/dynamic_debug.h>
-#include <acpi/acpi.h>
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
#include <acpi/acpi_numa.h>
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index ec4112d..7d1955a 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -176,6 +176,7 @@
#define CPUFREQ_RELATION_L 0 /* lowest frequency at or above target */
#define CPUFREQ_RELATION_H 1 /* highest frequency below or at target */
+#define CPUFREQ_RELATION_C 2 /* closest frequency to target */
struct freq_attr {
struct attribute attr;
@@ -482,8 +483,8 @@
*********************************************************************/
/* Special Values of .frequency field */
-#define CPUFREQ_ENTRY_INVALID ~0
-#define CPUFREQ_TABLE_END ~1
+#define CPUFREQ_ENTRY_INVALID ~0u
+#define CPUFREQ_TABLE_END ~1u
/* Special Values of .flags field */
#define CPUFREQ_BOOST_FREQ (1 << 0)
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 5ab4e3a..92abb49 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -593,6 +593,7 @@
struct device *dev;
void __iomem * const *iomap;
unsigned int n_ports;
+ unsigned int n_tags; /* nr of NCQ tags */
void *private_data;
struct ata_port_operations *ops;
unsigned long flags;
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index b12f4bb..35b51e7 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -578,8 +578,6 @@
u32 cons_index;
u16 irq;
- bool irq_affinity_change;
-
__be32 *set_ci_db;
__be32 *arm_db;
int arm_sn;
@@ -1167,6 +1165,8 @@
int *vector);
void mlx4_release_eq(struct mlx4_dev *dev, int vec);
+int mlx4_eq_get_irq(struct mlx4_dev *dev, int vec);
+
int mlx4_get_phys_port_id(struct mlx4_dev *dev);
int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port);
int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port);
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index 11692de..42aa9b9 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -17,6 +17,7 @@
#include <linux/lockdep.h>
#include <linux/atomic.h>
#include <asm/processor.h>
+#include <linux/osq_lock.h>
/*
* Simple, straightforward mutexes with strict semantics:
@@ -46,7 +47,6 @@
* - detects multi-task circular deadlocks and prints out all affected
* locks and tasks (and only those tasks)
*/
-struct optimistic_spin_queue;
struct mutex {
/* 1: unlocked, 0: locked, negative: locked, possible waiters */
atomic_t count;
@@ -56,7 +56,7 @@
struct task_struct *owner;
#endif
#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
- struct optimistic_spin_queue *osq; /* Spinner MCS lock */
+ struct optimistic_spin_queue osq; /* Spinner MCS lock */
#endif
#ifdef CONFIG_DEBUG_MUTEXES
const char *name;
diff --git a/include/linux/of_mdio.h b/include/linux/of_mdio.h
index a70c949..d449018 100644
--- a/include/linux/of_mdio.h
+++ b/include/linux/of_mdio.h
@@ -25,9 +25,6 @@
extern struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np);
-extern void of_mdiobus_link_phydev(struct mii_bus *mdio,
- struct phy_device *phydev);
-
#else /* CONFIG_OF */
static inline int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
{
@@ -63,11 +60,6 @@
{
return NULL;
}
-
-static inline void of_mdiobus_link_phydev(struct mii_bus *mdio,
- struct phy_device *phydev)
-{
-}
#endif /* CONFIG_OF */
#if defined(CONFIG_OF) && defined(CONFIG_FIXED_PHY)
diff --git a/include/linux/osq_lock.h b/include/linux/osq_lock.h
new file mode 100644
index 0000000..90230d5
--- /dev/null
+++ b/include/linux/osq_lock.h
@@ -0,0 +1,27 @@
+#ifndef __LINUX_OSQ_LOCK_H
+#define __LINUX_OSQ_LOCK_H
+
+/*
+ * An MCS like lock especially tailored for optimistic spinning for sleeping
+ * lock implementations (mutex, rwsem, etc).
+ */
+
+#define OSQ_UNLOCKED_VAL (0)
+
+struct optimistic_spin_queue {
+ /*
+ * Stores an encoded value of the CPU # of the tail node in the queue.
+ * If the queue is empty, then it's set to OSQ_UNLOCKED_VAL.
+ */
+ atomic_t tail;
+};
+
+/* Init macro and function. */
+#define OSQ_LOCK_UNLOCKED { ATOMIC_INIT(OSQ_UNLOCKED_VAL) }
+
+static inline void osq_lock_init(struct optimistic_spin_queue *lock)
+{
+ atomic_set(&lock->tail, OSQ_UNLOCKED_VAL);
+}
+
+#endif
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 0a97b58..e1474ae 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -399,6 +399,18 @@
}
/*
+ * Get the offset in PAGE_SIZE.
+ * (TODO: hugepage should have ->index in PAGE_SIZE)
+ */
+static inline pgoff_t page_to_pgoff(struct page *page)
+{
+ if (unlikely(PageHeadHuge(page)))
+ return page->index << compound_order(page);
+ else
+ return page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
+}
+
+/*
* Return byte-offset into filesystem object for page.
*/
static inline loff_t page_offset(struct page *page)
diff --git a/include/linux/pci-acpi.h b/include/linux/pci-acpi.h
index 637a608..64dacb7 100644
--- a/include/linux/pci-acpi.h
+++ b/include/linux/pci-acpi.h
@@ -11,12 +11,17 @@
#include <linux/acpi.h>
#ifdef CONFIG_ACPI
-extern acpi_status pci_acpi_add_bus_pm_notifier(struct acpi_device *dev,
- struct pci_bus *pci_bus);
-extern acpi_status pci_acpi_remove_bus_pm_notifier(struct acpi_device *dev);
+extern acpi_status pci_acpi_add_bus_pm_notifier(struct acpi_device *dev);
+static inline acpi_status pci_acpi_remove_bus_pm_notifier(struct acpi_device *dev)
+{
+ return acpi_remove_pm_notifier(dev);
+}
extern acpi_status pci_acpi_add_pm_notifier(struct acpi_device *dev,
struct pci_dev *pci_dev);
-extern acpi_status pci_acpi_remove_pm_notifier(struct acpi_device *dev);
+static inline acpi_status pci_acpi_remove_pm_notifier(struct acpi_device *dev)
+{
+ return acpi_remove_pm_notifier(dev);
+}
extern phys_addr_t acpi_pci_root_get_mcfg_addr(acpi_handle handle);
static inline acpi_handle acpi_find_root_bridge_handle(struct pci_dev *pdev)
diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
index 43fd671..367f49b 100644
--- a/include/linux/pm_runtime.h
+++ b/include/linux/pm_runtime.h
@@ -24,11 +24,20 @@
#define RPM_AUTO 0x08 /* Use autosuspend_delay */
#ifdef CONFIG_PM
+extern struct workqueue_struct *pm_wq;
+
+static inline bool queue_pm_work(struct work_struct *work)
+{
+ return queue_work(pm_wq, work);
+}
+
extern int pm_generic_runtime_suspend(struct device *dev);
extern int pm_generic_runtime_resume(struct device *dev);
extern int pm_runtime_force_suspend(struct device *dev);
extern int pm_runtime_force_resume(struct device *dev);
#else
+static inline bool queue_pm_work(struct work_struct *work) { return false; }
+
static inline int pm_generic_runtime_suspend(struct device *dev) { return 0; }
static inline int pm_generic_runtime_resume(struct device *dev) { return 0; }
static inline int pm_runtime_force_suspend(struct device *dev) { return 0; }
@@ -37,8 +46,6 @@
#ifdef CONFIG_PM_RUNTIME
-extern struct workqueue_struct *pm_wq;
-
extern int __pm_runtime_idle(struct device *dev, int rpmflags);
extern int __pm_runtime_suspend(struct device *dev, int rpmflags);
extern int __pm_runtime_resume(struct device *dev, int rpmflags);
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 5a75d19..6a94cc8 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -44,7 +44,6 @@
#include <linux/debugobjects.h>
#include <linux/bug.h>
#include <linux/compiler.h>
-#include <linux/percpu.h>
#include <asm/barrier.h>
extern int rcu_expedited; /* for sysctl */
@@ -300,41 +299,6 @@
#endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) */
/*
- * Hooks for cond_resched() and friends to avoid RCU CPU stall warnings.
- */
-
-#define RCU_COND_RESCHED_LIM 256 /* ms vs. 100s of ms. */
-DECLARE_PER_CPU(int, rcu_cond_resched_count);
-void rcu_resched(void);
-
-/*
- * Is it time to report RCU quiescent states?
- *
- * Note unsynchronized access to rcu_cond_resched_count. Yes, we might
- * increment some random CPU's count, and possibly also load the result from
- * yet another CPU's count. We might even clobber some other CPU's attempt
- * to zero its counter. This is all OK because the goal is not precision,
- * but rather reasonable amortization of rcu_note_context_switch() overhead
- * and extremely high probability of avoiding RCU CPU stall warnings.
- * Note that this function has to be preempted in just the wrong place,
- * many thousands of times in a row, for anything bad to happen.
- */
-static inline bool rcu_should_resched(void)
-{
- return raw_cpu_inc_return(rcu_cond_resched_count) >=
- RCU_COND_RESCHED_LIM;
-}
-
-/*
- * Report quiscent states to RCU if it is time to do so.
- */
-static inline void rcu_cond_resched(void)
-{
- if (unlikely(rcu_should_resched()))
- rcu_resched();
-}
-
-/*
* Infrastructure to implement the synchronize_() primitives in
* TREE_RCU and rcu_barrier_() primitives in TINY_RCU.
*/
@@ -358,9 +322,19 @@
* initialization.
*/
#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
+void init_rcu_head(struct rcu_head *head);
+void destroy_rcu_head(struct rcu_head *head);
void init_rcu_head_on_stack(struct rcu_head *head);
void destroy_rcu_head_on_stack(struct rcu_head *head);
#else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
+static inline void init_rcu_head(struct rcu_head *head)
+{
+}
+
+static inline void destroy_rcu_head(struct rcu_head *head)
+{
+}
+
static inline void init_rcu_head_on_stack(struct rcu_head *head)
{
}
diff --git a/include/linux/rwsem-spinlock.h b/include/linux/rwsem-spinlock.h
index d5b13bc..561e861 100644
--- a/include/linux/rwsem-spinlock.h
+++ b/include/linux/rwsem-spinlock.h
@@ -15,13 +15,13 @@
#ifdef __KERNEL__
/*
* the rw-semaphore definition
- * - if activity is 0 then there are no active readers or writers
- * - if activity is +ve then that is the number of active readers
- * - if activity is -1 then there is one active writer
+ * - if count is 0 then there are no active readers or writers
+ * - if count is +ve then that is the number of active readers
+ * - if count is -1 then there is one active writer
* - if wait_list is not empty, then there are processes waiting for the semaphore
*/
struct rw_semaphore {
- __s32 activity;
+ __s32 count;
raw_spinlock_t wait_lock;
struct list_head wait_list;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
index 8d79708..035d3c5 100644
--- a/include/linux/rwsem.h
+++ b/include/linux/rwsem.h
@@ -13,10 +13,11 @@
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/spinlock.h>
-
#include <linux/atomic.h>
+#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
+#include <linux/osq_lock.h>
+#endif
-struct optimistic_spin_queue;
struct rw_semaphore;
#ifdef CONFIG_RWSEM_GENERIC_SPINLOCK
@@ -25,15 +26,15 @@
/* All arch specific implementations share the same struct */
struct rw_semaphore {
long count;
- raw_spinlock_t wait_lock;
struct list_head wait_list;
-#ifdef CONFIG_SMP
+ raw_spinlock_t wait_lock;
+#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
+ struct optimistic_spin_queue osq; /* spinner MCS lock */
/*
* Write owner. Used as a speculative check to see
* if the owner is running on the cpu.
*/
struct task_struct *owner;
- struct optimistic_spin_queue *osq; /* spinner MCS lock */
#endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
@@ -64,22 +65,19 @@
# define __RWSEM_DEP_MAP_INIT(lockname)
#endif
-#if defined(CONFIG_SMP) && !defined(CONFIG_RWSEM_GENERIC_SPINLOCK)
-#define __RWSEM_INITIALIZER(name) \
- { RWSEM_UNLOCKED_VALUE, \
- __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \
- LIST_HEAD_INIT((name).wait_list), \
- NULL, /* owner */ \
- NULL /* mcs lock */ \
- __RWSEM_DEP_MAP_INIT(name) }
+#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
+#define __RWSEM_OPT_INIT(lockname) , .osq = OSQ_LOCK_UNLOCKED, .owner = NULL
#else
-#define __RWSEM_INITIALIZER(name) \
- { RWSEM_UNLOCKED_VALUE, \
- __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \
- LIST_HEAD_INIT((name).wait_list) \
- __RWSEM_DEP_MAP_INIT(name) }
+#define __RWSEM_OPT_INIT(lockname)
#endif
+#define __RWSEM_INITIALIZER(name) \
+ { .count = RWSEM_UNLOCKED_VALUE, \
+ .wait_list = LIST_HEAD_INIT((name).wait_list), \
+ .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock) \
+ __RWSEM_OPT_INIT(name) \
+ __RWSEM_DEP_MAP_INIT(name) }
+
#define DECLARE_RWSEM(name) \
struct rw_semaphore name = __RWSEM_INITIALIZER(name)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 306f4f0..0376b05 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -872,21 +872,21 @@
#define SD_NUMA 0x4000 /* cross-node balancing */
#ifdef CONFIG_SCHED_SMT
-static inline const int cpu_smt_flags(void)
+static inline int cpu_smt_flags(void)
{
return SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
}
#endif
#ifdef CONFIG_SCHED_MC
-static inline const int cpu_core_flags(void)
+static inline int cpu_core_flags(void)
{
return SD_SHARE_PKG_RESOURCES;
}
#endif
#ifdef CONFIG_NUMA
-static inline const int cpu_numa_flags(void)
+static inline int cpu_numa_flags(void)
{
return SD_NUMA;
}
@@ -999,7 +999,7 @@
bool cpus_share_cache(int this_cpu, int that_cpu);
typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);
-typedef const int (*sched_domain_flags_f)(void);
+typedef int (*sched_domain_flags_f)(void);
#define SDTL_OVERLAP 0x01
diff --git a/include/linux/sfi_acpi.h b/include/linux/sfi_acpi.h
index 4723bbf..a6e555c 100644
--- a/include/linux/sfi_acpi.h
+++ b/include/linux/sfi_acpi.h
@@ -63,8 +63,6 @@
#include <linux/sfi.h>
#ifdef CONFIG_SFI
-#include <acpi/acpi.h> /* FIXME: inclusion should be removed */
-
extern int sfi_acpi_table_parse(char *signature, char *oem_id,
char *oem_table_id,
int (*handler)(struct acpi_table_header *));
@@ -78,7 +76,6 @@
return sfi_acpi_table_parse(signature, NULL, NULL, handler);
}
#else /* !CONFIG_SFI */
-
static inline int sfi_acpi_table_parse(char *signature, char *oem_id,
char *oem_table_id,
int (*handler)(struct acpi_table_header *))
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index 7277caf..47f4254 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -203,7 +203,6 @@
void (*proxy_redo)(struct sk_buff *skb);
char *id;
struct neigh_parms parms;
- /* HACK. gc_* should follow parms without a gap! */
int gc_interval;
int gc_thresh1;
int gc_thresh2;
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 713b0b8..c4d8619 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -6,6 +6,7 @@
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter/nf_tables.h>
+#include <linux/u64_stats_sync.h>
#include <net/netlink.h>
#define NFT_JUMP_STACK_SIZE 16
@@ -528,8 +529,9 @@
};
struct nft_stats {
- u64 bytes;
- u64 pkts;
+ u64 bytes;
+ u64 pkts;
+ struct u64_stats_sync syncp;
};
#define NFT_HOOK_OPS_MAX 2
diff --git a/include/net/netns/ieee802154_6lowpan.h b/include/net/netns/ieee802154_6lowpan.h
index 079030c..e207096 100644
--- a/include/net/netns/ieee802154_6lowpan.h
+++ b/include/net/netns/ieee802154_6lowpan.h
@@ -16,7 +16,7 @@
struct netns_ieee802154_lowpan {
struct netns_sysctl_lowpan sysctl;
struct netns_frags frags;
- u16 max_dsize;
+ int max_dsize;
};
#endif
diff --git a/include/net/netns/nftables.h b/include/net/netns/nftables.h
index 26a394c..eee608b 100644
--- a/include/net/netns/nftables.h
+++ b/include/net/netns/nftables.h
@@ -13,8 +13,8 @@
struct nft_af_info *inet;
struct nft_af_info *arp;
struct nft_af_info *bridge;
+ unsigned int base_seq;
u8 gencursor;
- u8 genctr;
};
#endif
diff --git a/include/net/sock.h b/include/net/sock.h
index 173cae4..1563507 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1768,9 +1768,11 @@
static inline void
sk_dst_set(struct sock *sk, struct dst_entry *dst)
{
- spin_lock(&sk->sk_dst_lock);
- __sk_dst_set(sk, dst);
- spin_unlock(&sk->sk_dst_lock);
+ struct dst_entry *old_dst;
+
+ sk_tx_queue_clear(sk);
+ old_dst = xchg((__force struct dst_entry **)&sk->sk_dst_cache, dst);
+ dst_release(old_dst);
}
static inline void
@@ -1782,9 +1784,7 @@
static inline void
sk_dst_reset(struct sock *sk)
{
- spin_lock(&sk->sk_dst_lock);
- __sk_dst_reset(sk);
- spin_unlock(&sk->sk_dst_lock);
+ sk_dst_set(sk, NULL);
}
struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie);
diff --git a/include/uapi/linux/fuse.h b/include/uapi/linux/fuse.h
index 40b5ca8..25084a0 100644
--- a/include/uapi/linux/fuse.h
+++ b/include/uapi/linux/fuse.h
@@ -101,6 +101,7 @@
* - add FATTR_CTIME
* - add ctime and ctimensec to fuse_setattr_in
* - add FUSE_RENAME2 request
+ * - add FUSE_NO_OPEN_SUPPORT flag
*/
#ifndef _LINUX_FUSE_H
@@ -229,6 +230,7 @@
* FUSE_READDIRPLUS_AUTO: adaptive readdirplus
* FUSE_ASYNC_DIO: asynchronous direct I/O submission
* FUSE_WRITEBACK_CACHE: use writeback cache for buffered writes
+ * FUSE_NO_OPEN_SUPPORT: kernel supports zero-message opens
*/
#define FUSE_ASYNC_READ (1 << 0)
#define FUSE_POSIX_LOCKS (1 << 1)
@@ -247,6 +249,7 @@
#define FUSE_READDIRPLUS_AUTO (1 << 14)
#define FUSE_ASYNC_DIO (1 << 15)
#define FUSE_WRITEBACK_CACHE (1 << 16)
+#define FUSE_NO_OPEN_SUPPORT (1 << 17)
/**
* CUSE INIT request/reply flags
diff --git a/kernel/Kconfig.locks b/kernel/Kconfig.locks
index 35536d9..76768ee 100644
--- a/kernel/Kconfig.locks
+++ b/kernel/Kconfig.locks
@@ -220,9 +220,16 @@
endif
+config ARCH_SUPPORTS_ATOMIC_RMW
+ bool
+
config MUTEX_SPIN_ON_OWNER
def_bool y
- depends on SMP && !DEBUG_MUTEXES
+ depends on SMP && !DEBUG_MUTEXES && ARCH_SUPPORTS_ATOMIC_RMW
+
+config RWSEM_SPIN_ON_OWNER
+ def_bool y
+ depends on SMP && RWSEM_XCHGADD_ALGORITHM && ARCH_SUPPORTS_ATOMIC_RMW
config ARCH_USE_QUEUE_RWLOCK
bool
diff --git a/kernel/events/core.c b/kernel/events/core.c
index a33d9a2b..6b17ac1 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -2320,7 +2320,7 @@
next_parent = rcu_dereference(next_ctx->parent_ctx);
/* If neither context have a parent context; they cannot be clones. */
- if (!parent && !next_parent)
+ if (!parent || !next_parent)
goto unlock;
if (next_parent == ctx || next_ctx == parent || next_parent == parent) {
@@ -7458,7 +7458,19 @@
struct perf_event_context *child_ctx,
struct task_struct *child)
{
- perf_remove_from_context(child_event, true);
+ /*
+ * Do not destroy the 'original' grouping; because of the context
+ * switch optimization the original events could've ended up in a
+ * random child task.
+ *
+ * If we were to destroy the original group, all group related
+ * operations would cease to function properly after this random
+ * child dies.
+ *
+ * Do destroy all inherited groups, we don't care about those
+ * and being thorough is better.
+ */
+ perf_remove_from_context(child_event, !!child_event->parent);
/*
* It can happen that the parent exits first, and has events
@@ -7474,7 +7486,7 @@
static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
{
struct perf_event *child_event, *next;
- struct perf_event_context *child_ctx;
+ struct perf_event_context *child_ctx, *parent_ctx;
unsigned long flags;
if (likely(!child->perf_event_ctxp[ctxn])) {
@@ -7499,6 +7511,15 @@
raw_spin_lock(&child_ctx->lock);
task_ctx_sched_out(child_ctx);
child->perf_event_ctxp[ctxn] = NULL;
+
+ /*
+ * In order to avoid freeing: child_ctx->parent_ctx->task
+ * under perf_event_context::lock, grab another reference.
+ */
+ parent_ctx = child_ctx->parent_ctx;
+ if (parent_ctx)
+ get_ctx(parent_ctx);
+
/*
* If this context is a clone; unclone it so it can't get
* swapped to another process while we're removing all
@@ -7509,6 +7530,13 @@
raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
/*
+ * Now that we no longer hold perf_event_context::lock, drop
+ * our extra child_ctx->parent_ctx reference.
+ */
+ if (parent_ctx)
+ put_ctx(parent_ctx);
+
+ /*
* Report the task dead after unscheduling the events so that we
* won't get any samples after PERF_RECORD_EXIT. We can however still
* get a few PERF_RECORD_READ events.
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 3214289..734e9a7 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -2037,19 +2037,23 @@
{
unsigned long *iter;
struct kprobe_blacklist_entry *ent;
- unsigned long offset = 0, size = 0;
+ unsigned long entry, offset = 0, size = 0;
for (iter = start; iter < end; iter++) {
- if (!kallsyms_lookup_size_offset(*iter, &size, &offset)) {
- pr_err("Failed to find blacklist %p\n", (void *)*iter);
+ entry = arch_deref_entry_point((void *)*iter);
+
+ if (!kernel_text_address(entry) ||
+ !kallsyms_lookup_size_offset(entry, &size, &offset)) {
+ pr_err("Failed to find blacklist at %p\n",
+ (void *)entry);
continue;
}
ent = kmalloc(sizeof(*ent), GFP_KERNEL);
if (!ent)
return -ENOMEM;
- ent->start_addr = *iter;
- ent->end_addr = *iter + size;
+ ent->start_addr = entry;
+ ent->end_addr = entry + size;
INIT_LIST_HEAD(&ent->list);
list_add_tail(&ent->list, &kprobe_blacklist);
}
diff --git a/kernel/locking/mcs_spinlock.c b/kernel/locking/mcs_spinlock.c
index 838dc9e..be9ee15 100644
--- a/kernel/locking/mcs_spinlock.c
+++ b/kernel/locking/mcs_spinlock.c
@@ -14,21 +14,47 @@
* called from interrupt context and we have preemption disabled while
* spinning.
*/
-static DEFINE_PER_CPU_SHARED_ALIGNED(struct optimistic_spin_queue, osq_node);
+static DEFINE_PER_CPU_SHARED_ALIGNED(struct optimistic_spin_node, osq_node);
+
+/*
+ * We use the value 0 to represent "no CPU", thus the encoded value
+ * will be the CPU number incremented by 1.
+ */
+static inline int encode_cpu(int cpu_nr)
+{
+ return cpu_nr + 1;
+}
+
+static inline struct optimistic_spin_node *decode_cpu(int encoded_cpu_val)
+{
+ int cpu_nr = encoded_cpu_val - 1;
+
+ return per_cpu_ptr(&osq_node, cpu_nr);
+}
/*
* Get a stable @node->next pointer, either for unlock() or unqueue() purposes.
* Can return NULL in case we were the last queued and we updated @lock instead.
*/
-static inline struct optimistic_spin_queue *
-osq_wait_next(struct optimistic_spin_queue **lock,
- struct optimistic_spin_queue *node,
- struct optimistic_spin_queue *prev)
+static inline struct optimistic_spin_node *
+osq_wait_next(struct optimistic_spin_queue *lock,
+ struct optimistic_spin_node *node,
+ struct optimistic_spin_node *prev)
{
- struct optimistic_spin_queue *next = NULL;
+ struct optimistic_spin_node *next = NULL;
+ int curr = encode_cpu(smp_processor_id());
+ int old;
+
+ /*
+ * If there is a prev node in queue, then the 'old' value will be
+ * the prev node's CPU #, else it's set to OSQ_UNLOCKED_VAL since if
+ * we're currently last in queue, then the queue will then become empty.
+ */
+ old = prev ? prev->cpu : OSQ_UNLOCKED_VAL;
for (;;) {
- if (*lock == node && cmpxchg(lock, node, prev) == node) {
+ if (atomic_read(&lock->tail) == curr &&
+ atomic_cmpxchg(&lock->tail, curr, old) == curr) {
/*
* We were the last queued, we moved @lock back. @prev
* will now observe @lock and will complete its
@@ -59,18 +85,23 @@
return next;
}
-bool osq_lock(struct optimistic_spin_queue **lock)
+bool osq_lock(struct optimistic_spin_queue *lock)
{
- struct optimistic_spin_queue *node = this_cpu_ptr(&osq_node);
- struct optimistic_spin_queue *prev, *next;
+ struct optimistic_spin_node *node = this_cpu_ptr(&osq_node);
+ struct optimistic_spin_node *prev, *next;
+ int curr = encode_cpu(smp_processor_id());
+ int old;
node->locked = 0;
node->next = NULL;
+ node->cpu = curr;
- node->prev = prev = xchg(lock, node);
- if (likely(prev == NULL))
+ old = atomic_xchg(&lock->tail, curr);
+ if (old == OSQ_UNLOCKED_VAL)
return true;
+ prev = decode_cpu(old);
+ node->prev = prev;
ACCESS_ONCE(prev->next) = node;
/*
@@ -149,20 +180,21 @@
return false;
}
-void osq_unlock(struct optimistic_spin_queue **lock)
+void osq_unlock(struct optimistic_spin_queue *lock)
{
- struct optimistic_spin_queue *node = this_cpu_ptr(&osq_node);
- struct optimistic_spin_queue *next;
+ struct optimistic_spin_node *node, *next;
+ int curr = encode_cpu(smp_processor_id());
/*
* Fast path for the uncontended case.
*/
- if (likely(cmpxchg(lock, node, NULL) == node))
+ if (likely(atomic_cmpxchg(&lock->tail, curr, OSQ_UNLOCKED_VAL) == curr))
return;
/*
* Second most likely case.
*/
+ node = this_cpu_ptr(&osq_node);
next = xchg(&node->next, NULL);
if (next) {
ACCESS_ONCE(next->locked) = 1;
diff --git a/kernel/locking/mcs_spinlock.h b/kernel/locking/mcs_spinlock.h
index a2dbac4..74356dc 100644
--- a/kernel/locking/mcs_spinlock.h
+++ b/kernel/locking/mcs_spinlock.h
@@ -118,12 +118,13 @@
* mutex_lock()/rwsem_down_{read,write}() etc.
*/
-struct optimistic_spin_queue {
- struct optimistic_spin_queue *next, *prev;
+struct optimistic_spin_node {
+ struct optimistic_spin_node *next, *prev;
int locked; /* 1 if lock acquired */
+ int cpu; /* encoded CPU # value */
};
-extern bool osq_lock(struct optimistic_spin_queue **lock);
-extern void osq_unlock(struct optimistic_spin_queue **lock);
+extern bool osq_lock(struct optimistic_spin_queue *lock);
+extern void osq_unlock(struct optimistic_spin_queue *lock);
#endif /* __LINUX_MCS_SPINLOCK_H */
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index bc73d33..acca2c1 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -60,7 +60,7 @@
INIT_LIST_HEAD(&lock->wait_list);
mutex_clear_owner(lock);
#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
- lock->osq = NULL;
+ osq_lock_init(&lock->osq);
#endif
debug_mutex_init(lock, name, key);
diff --git a/kernel/locking/rwsem-spinlock.c b/kernel/locking/rwsem-spinlock.c
index 9be8a91..2c93571 100644
--- a/kernel/locking/rwsem-spinlock.c
+++ b/kernel/locking/rwsem-spinlock.c
@@ -26,7 +26,7 @@
unsigned long flags;
if (raw_spin_trylock_irqsave(&sem->wait_lock, flags)) {
- ret = (sem->activity != 0);
+ ret = (sem->count != 0);
raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
}
return ret;
@@ -46,7 +46,7 @@
debug_check_no_locks_freed((void *)sem, sizeof(*sem));
lockdep_init_map(&sem->dep_map, name, key, 0);
#endif
- sem->activity = 0;
+ sem->count = 0;
raw_spin_lock_init(&sem->wait_lock);
INIT_LIST_HEAD(&sem->wait_list);
}
@@ -95,7 +95,7 @@
waiter = list_entry(next, struct rwsem_waiter, list);
} while (waiter->type != RWSEM_WAITING_FOR_WRITE);
- sem->activity += woken;
+ sem->count += woken;
out:
return sem;
@@ -126,9 +126,9 @@
raw_spin_lock_irqsave(&sem->wait_lock, flags);
- if (sem->activity >= 0 && list_empty(&sem->wait_list)) {
+ if (sem->count >= 0 && list_empty(&sem->wait_list)) {
/* granted */
- sem->activity++;
+ sem->count++;
raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
goto out;
}
@@ -170,9 +170,9 @@
raw_spin_lock_irqsave(&sem->wait_lock, flags);
- if (sem->activity >= 0 && list_empty(&sem->wait_list)) {
+ if (sem->count >= 0 && list_empty(&sem->wait_list)) {
/* granted */
- sem->activity++;
+ sem->count++;
ret = 1;
}
@@ -206,7 +206,7 @@
* itself into sleep and waiting for system woke it or someone
* else in the head of the wait list up.
*/
- if (sem->activity == 0)
+ if (sem->count == 0)
break;
set_task_state(tsk, TASK_UNINTERRUPTIBLE);
raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
@@ -214,7 +214,7 @@
raw_spin_lock_irqsave(&sem->wait_lock, flags);
}
/* got the lock */
- sem->activity = -1;
+ sem->count = -1;
list_del(&waiter.list);
raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
@@ -235,9 +235,9 @@
raw_spin_lock_irqsave(&sem->wait_lock, flags);
- if (sem->activity == 0) {
+ if (sem->count == 0) {
/* got the lock */
- sem->activity = -1;
+ sem->count = -1;
ret = 1;
}
@@ -255,7 +255,7 @@
raw_spin_lock_irqsave(&sem->wait_lock, flags);
- if (--sem->activity == 0 && !list_empty(&sem->wait_list))
+ if (--sem->count == 0 && !list_empty(&sem->wait_list))
sem = __rwsem_wake_one_writer(sem);
raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
@@ -270,7 +270,7 @@
raw_spin_lock_irqsave(&sem->wait_lock, flags);
- sem->activity = 0;
+ sem->count = 0;
if (!list_empty(&sem->wait_list))
sem = __rwsem_do_wake(sem, 1);
@@ -287,7 +287,7 @@
raw_spin_lock_irqsave(&sem->wait_lock, flags);
- sem->activity = 1;
+ sem->count = 1;
if (!list_empty(&sem->wait_list))
sem = __rwsem_do_wake(sem, 0);
diff --git a/kernel/locking/rwsem-xadd.c b/kernel/locking/rwsem-xadd.c
index dacc321..a2391ac 100644
--- a/kernel/locking/rwsem-xadd.c
+++ b/kernel/locking/rwsem-xadd.c
@@ -82,9 +82,9 @@
sem->count = RWSEM_UNLOCKED_VALUE;
raw_spin_lock_init(&sem->wait_lock);
INIT_LIST_HEAD(&sem->wait_list);
-#ifdef CONFIG_SMP
+#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
sem->owner = NULL;
- sem->osq = NULL;
+ osq_lock_init(&sem->osq);
#endif
}
@@ -262,7 +262,7 @@
return false;
}
-#ifdef CONFIG_SMP
+#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
/*
* Try to acquire write lock before the writer has been put on wait queue.
*/
@@ -285,10 +285,10 @@
static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
{
struct task_struct *owner;
- bool on_cpu = true;
+ bool on_cpu = false;
if (need_resched())
- return 0;
+ return false;
rcu_read_lock();
owner = ACCESS_ONCE(sem->owner);
@@ -297,9 +297,9 @@
rcu_read_unlock();
/*
- * If sem->owner is not set, the rwsem owner may have
- * just acquired it and not set the owner yet or the rwsem
- * has been released.
+ * If sem->owner is not set, yet we have just recently entered the
+ * slowpath, then there is a possibility reader(s) may have the lock.
+ * To be safe, avoid spinning in these situations.
*/
return on_cpu;
}
diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c
index 42f806d..e2d3bc7 100644
--- a/kernel/locking/rwsem.c
+++ b/kernel/locking/rwsem.c
@@ -12,7 +12,7 @@
#include <linux/atomic.h>
-#if defined(CONFIG_SMP) && defined(CONFIG_RWSEM_XCHGADD_ALGORITHM)
+#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
static inline void rwsem_set_owner(struct rw_semaphore *sem)
{
sem->owner = current;
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index 9a83d78..e4e4121 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -253,9 +253,6 @@
anything, try disabling/enabling this option (or disabling/enabling
APM in your BIOS).
-config ARCH_HAS_OPP
- bool
-
config PM_OPP
bool
---help---
diff --git a/kernel/power/main.c b/kernel/power/main.c
index 8e90f33..9a59d04 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -296,8 +296,8 @@
suspend_state_t i;
for (i = PM_SUSPEND_MIN; i < PM_SUSPEND_MAX; i++)
- if (pm_states[i].state)
- s += sprintf(s,"%s ", pm_states[i].label);
+ if (pm_states[i])
+ s += sprintf(s,"%s ", pm_states[i]);
#endif
if (hibernation_available())
@@ -311,8 +311,7 @@
static suspend_state_t decode_state(const char *buf, size_t n)
{
#ifdef CONFIG_SUSPEND
- suspend_state_t state = PM_SUSPEND_MIN;
- struct pm_sleep_state *s;
+ suspend_state_t state;
#endif
char *p;
int len;
@@ -325,10 +324,12 @@
return PM_SUSPEND_MAX;
#ifdef CONFIG_SUSPEND
- for (s = &pm_states[state]; state < PM_SUSPEND_MAX; s++, state++)
- if (s->state && len == strlen(s->label)
- && !strncmp(buf, s->label, len))
- return s->state;
+ for (state = PM_SUSPEND_MIN; state < PM_SUSPEND_MAX; state++) {
+ const char *label = pm_states[state];
+
+ if (label && len == strlen(label) && !strncmp(buf, label, len))
+ return state;
+ }
#endif
return PM_SUSPEND_ON;
@@ -446,8 +447,8 @@
#ifdef CONFIG_SUSPEND
if (state < PM_SUSPEND_MAX)
- return sprintf(buf, "%s\n", pm_states[state].state ?
- pm_states[state].label : "error");
+ return sprintf(buf, "%s\n", pm_states[state] ?
+ pm_states[state] : "error");
#endif
#ifdef CONFIG_HIBERNATION
return sprintf(buf, "disk\n");
@@ -615,7 +616,6 @@
.attrs = g,
};
-#ifdef CONFIG_PM_RUNTIME
struct workqueue_struct *pm_wq;
EXPORT_SYMBOL_GPL(pm_wq);
@@ -625,9 +625,6 @@
return pm_wq ? 0 : -ENOMEM;
}
-#else
-static inline int pm_start_workqueue(void) { return 0; }
-#endif
static int __init pm_init(void)
{
diff --git a/kernel/power/power.h b/kernel/power/power.h
index c60f13b..5d49dca 100644
--- a/kernel/power/power.h
+++ b/kernel/power/power.h
@@ -178,13 +178,8 @@
unsigned int, char *);
#ifdef CONFIG_SUSPEND
-struct pm_sleep_state {
- const char *label;
- suspend_state_t state;
-};
-
/* kernel/power/suspend.c */
-extern struct pm_sleep_state pm_states[];
+extern const char *pm_states[];
extern int suspend_devices_and_enter(suspend_state_t state);
#else /* !CONFIG_SUSPEND */
diff --git a/kernel/power/process.c b/kernel/power/process.c
index 0ca8d83..4ee194e 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -186,6 +186,7 @@
printk("Restarting tasks ... ");
+ __usermodehelper_set_disable_depth(UMH_FREEZING);
thaw_workqueues();
read_lock(&tasklist_lock);
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 1ea328a..4fc5c32 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -248,33 +248,61 @@
* information is stored (in the form of a block of bitmap)
* It also contains the pfns that correspond to the start and end of
* the represented memory area.
+ *
+ * The memory bitmap is organized as a radix tree to guarantee fast random
+ * access to the bits. There is one radix tree for each zone (as returned
+ * from create_mem_extents).
+ *
+ * One radix tree is represented by one struct mem_zone_bm_rtree. There are
+ * two linked lists for the nodes of the tree, one for the inner nodes and
+ * one for the leave nodes. The linked leave nodes are used for fast linear
+ * access of the memory bitmap.
+ *
+ * The struct rtree_node represents one node of the radix tree.
*/
#define BM_END_OF_MAP (~0UL)
#define BM_BITS_PER_BLOCK (PAGE_SIZE * BITS_PER_BYTE)
+#define BM_BLOCK_SHIFT (PAGE_SHIFT + 3)
+#define BM_BLOCK_MASK ((1UL << BM_BLOCK_SHIFT) - 1)
-struct bm_block {
- struct list_head hook; /* hook into a list of bitmap blocks */
- unsigned long start_pfn; /* pfn represented by the first bit */
- unsigned long end_pfn; /* pfn represented by the last bit plus 1 */
- unsigned long *data; /* bitmap representing pages */
+/*
+ * struct rtree_node is a wrapper struct to link the nodes
+ * of the rtree together for easy linear iteration over
+ * bits and easy freeing
+ */
+struct rtree_node {
+ struct list_head list;
+ unsigned long *data;
};
-static inline unsigned long bm_block_bits(struct bm_block *bb)
-{
- return bb->end_pfn - bb->start_pfn;
-}
+/*
+ * struct mem_zone_bm_rtree represents a bitmap used for one
+ * populated memory zone.
+ */
+struct mem_zone_bm_rtree {
+ struct list_head list; /* Link Zones together */
+ struct list_head nodes; /* Radix Tree inner nodes */
+ struct list_head leaves; /* Radix Tree leaves */
+ unsigned long start_pfn; /* Zone start page frame */
+ unsigned long end_pfn; /* Zone end page frame + 1 */
+ struct rtree_node *rtree; /* Radix Tree Root */
+ int levels; /* Number of Radix Tree Levels */
+ unsigned int blocks; /* Number of Bitmap Blocks */
+};
/* strcut bm_position is used for browsing memory bitmaps */
struct bm_position {
- struct bm_block *block;
- int bit;
+ struct mem_zone_bm_rtree *zone;
+ struct rtree_node *node;
+ unsigned long node_pfn;
+ int node_bit;
};
struct memory_bitmap {
- struct list_head blocks; /* list of bitmap blocks */
+ struct list_head zones;
struct linked_page *p_list; /* list of pages used to store zone
* bitmap objects and bitmap block
* objects
@@ -284,38 +312,178 @@
/* Functions that operate on memory bitmaps */
-static void memory_bm_position_reset(struct memory_bitmap *bm)
+#define BM_ENTRIES_PER_LEVEL (PAGE_SIZE / sizeof(unsigned long))
+#if BITS_PER_LONG == 32
+#define BM_RTREE_LEVEL_SHIFT (PAGE_SHIFT - 2)
+#else
+#define BM_RTREE_LEVEL_SHIFT (PAGE_SHIFT - 3)
+#endif
+#define BM_RTREE_LEVEL_MASK ((1UL << BM_RTREE_LEVEL_SHIFT) - 1)
+
+/*
+ * alloc_rtree_node - Allocate a new node and add it to the radix tree.
+ *
+ * This function is used to allocate inner nodes as well as the
+ * leave nodes of the radix tree. It also adds the node to the
+ * corresponding linked list passed in by the *list parameter.
+ */
+static struct rtree_node *alloc_rtree_node(gfp_t gfp_mask, int safe_needed,
+ struct chain_allocator *ca,
+ struct list_head *list)
{
- bm->cur.block = list_entry(bm->blocks.next, struct bm_block, hook);
- bm->cur.bit = 0;
+ struct rtree_node *node;
+
+ node = chain_alloc(ca, sizeof(struct rtree_node));
+ if (!node)
+ return NULL;
+
+ node->data = get_image_page(gfp_mask, safe_needed);
+ if (!node->data)
+ return NULL;
+
+ list_add_tail(&node->list, list);
+
+ return node;
}
-static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free);
-
-/**
- * create_bm_block_list - create a list of block bitmap objects
- * @pages - number of pages to track
- * @list - list to put the allocated blocks into
- * @ca - chain allocator to be used for allocating memory
+/*
+ * add_rtree_block - Add a new leave node to the radix tree
+ *
+ * The leave nodes need to be allocated in order to keep the leaves
+ * linked list in order. This is guaranteed by the zone->blocks
+ * counter.
*/
-static int create_bm_block_list(unsigned long pages,
- struct list_head *list,
- struct chain_allocator *ca)
+static int add_rtree_block(struct mem_zone_bm_rtree *zone, gfp_t gfp_mask,
+ int safe_needed, struct chain_allocator *ca)
{
- unsigned int nr_blocks = DIV_ROUND_UP(pages, BM_BITS_PER_BLOCK);
+ struct rtree_node *node, *block, **dst;
+ unsigned int levels_needed, block_nr;
+ int i;
- while (nr_blocks-- > 0) {
- struct bm_block *bb;
+ block_nr = zone->blocks;
+ levels_needed = 0;
- bb = chain_alloc(ca, sizeof(struct bm_block));
- if (!bb)
- return -ENOMEM;
- list_add(&bb->hook, list);
+ /* How many levels do we need for this block nr? */
+ while (block_nr) {
+ levels_needed += 1;
+ block_nr >>= BM_RTREE_LEVEL_SHIFT;
}
+ /* Make sure the rtree has enough levels */
+ for (i = zone->levels; i < levels_needed; i++) {
+ node = alloc_rtree_node(gfp_mask, safe_needed, ca,
+ &zone->nodes);
+ if (!node)
+ return -ENOMEM;
+
+ node->data[0] = (unsigned long)zone->rtree;
+ zone->rtree = node;
+ zone->levels += 1;
+ }
+
+ /* Allocate new block */
+ block = alloc_rtree_node(gfp_mask, safe_needed, ca, &zone->leaves);
+ if (!block)
+ return -ENOMEM;
+
+ /* Now walk the rtree to insert the block */
+ node = zone->rtree;
+ dst = &zone->rtree;
+ block_nr = zone->blocks;
+ for (i = zone->levels; i > 0; i--) {
+ int index;
+
+ if (!node) {
+ node = alloc_rtree_node(gfp_mask, safe_needed, ca,
+ &zone->nodes);
+ if (!node)
+ return -ENOMEM;
+ *dst = node;
+ }
+
+ index = block_nr >> ((i - 1) * BM_RTREE_LEVEL_SHIFT);
+ index &= BM_RTREE_LEVEL_MASK;
+ dst = (struct rtree_node **)&((*dst)->data[index]);
+ node = *dst;
+ }
+
+ zone->blocks += 1;
+ *dst = block;
+
return 0;
}
+static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone,
+ int clear_nosave_free);
+
+/*
+ * create_zone_bm_rtree - create a radix tree for one zone
+ *
+ * Allocated the mem_zone_bm_rtree structure and initializes it.
+ * This function also allocated and builds the radix tree for the
+ * zone.
+ */
+static struct mem_zone_bm_rtree *
+create_zone_bm_rtree(gfp_t gfp_mask, int safe_needed,
+ struct chain_allocator *ca,
+ unsigned long start, unsigned long end)
+{
+ struct mem_zone_bm_rtree *zone;
+ unsigned int i, nr_blocks;
+ unsigned long pages;
+
+ pages = end - start;
+ zone = chain_alloc(ca, sizeof(struct mem_zone_bm_rtree));
+ if (!zone)
+ return NULL;
+
+ INIT_LIST_HEAD(&zone->nodes);
+ INIT_LIST_HEAD(&zone->leaves);
+ zone->start_pfn = start;
+ zone->end_pfn = end;
+ nr_blocks = DIV_ROUND_UP(pages, BM_BITS_PER_BLOCK);
+
+ for (i = 0; i < nr_blocks; i++) {
+ if (add_rtree_block(zone, gfp_mask, safe_needed, ca)) {
+ free_zone_bm_rtree(zone, PG_UNSAFE_CLEAR);
+ return NULL;
+ }
+ }
+
+ return zone;
+}
+
+/*
+ * free_zone_bm_rtree - Free the memory of the radix tree
+ *
+ * Free all node pages of the radix tree. The mem_zone_bm_rtree
+ * structure itself is not freed here nor are the rtree_node
+ * structs.
+ */
+static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone,
+ int clear_nosave_free)
+{
+ struct rtree_node *node;
+
+ list_for_each_entry(node, &zone->nodes, list)
+ free_image_page(node->data, clear_nosave_free);
+
+ list_for_each_entry(node, &zone->leaves, list)
+ free_image_page(node->data, clear_nosave_free);
+}
+
+static void memory_bm_position_reset(struct memory_bitmap *bm)
+{
+ bm->cur.zone = list_entry(bm->zones.next, struct mem_zone_bm_rtree,
+ list);
+ bm->cur.node = list_entry(bm->cur.zone->leaves.next,
+ struct rtree_node, list);
+ bm->cur.node_pfn = 0;
+ bm->cur.node_bit = 0;
+}
+
+static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free);
+
struct mem_extent {
struct list_head hook;
unsigned long start;
@@ -407,40 +575,22 @@
int error;
chain_init(&ca, gfp_mask, safe_needed);
- INIT_LIST_HEAD(&bm->blocks);
+ INIT_LIST_HEAD(&bm->zones);
error = create_mem_extents(&mem_extents, gfp_mask);
if (error)
return error;
list_for_each_entry(ext, &mem_extents, hook) {
- struct bm_block *bb;
- unsigned long pfn = ext->start;
- unsigned long pages = ext->end - ext->start;
+ struct mem_zone_bm_rtree *zone;
- bb = list_entry(bm->blocks.prev, struct bm_block, hook);
-
- error = create_bm_block_list(pages, bm->blocks.prev, &ca);
- if (error)
+ zone = create_zone_bm_rtree(gfp_mask, safe_needed, &ca,
+ ext->start, ext->end);
+ if (!zone) {
+ error = -ENOMEM;
goto Error;
-
- list_for_each_entry_continue(bb, &bm->blocks, hook) {
- bb->data = get_image_page(gfp_mask, safe_needed);
- if (!bb->data) {
- error = -ENOMEM;
- goto Error;
- }
-
- bb->start_pfn = pfn;
- if (pages >= BM_BITS_PER_BLOCK) {
- pfn += BM_BITS_PER_BLOCK;
- pages -= BM_BITS_PER_BLOCK;
- } else {
- /* This is executed only once in the loop */
- pfn += pages;
- }
- bb->end_pfn = pfn;
}
+ list_add_tail(&zone->list, &bm->zones);
}
bm->p_list = ca.chain;
@@ -460,51 +610,83 @@
*/
static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free)
{
- struct bm_block *bb;
+ struct mem_zone_bm_rtree *zone;
- list_for_each_entry(bb, &bm->blocks, hook)
- if (bb->data)
- free_image_page(bb->data, clear_nosave_free);
+ list_for_each_entry(zone, &bm->zones, list)
+ free_zone_bm_rtree(zone, clear_nosave_free);
free_list_of_pages(bm->p_list, clear_nosave_free);
- INIT_LIST_HEAD(&bm->blocks);
+ INIT_LIST_HEAD(&bm->zones);
}
/**
- * memory_bm_find_bit - find the bit in the bitmap @bm that corresponds
- * to given pfn. The cur_zone_bm member of @bm and the cur_block member
- * of @bm->cur_zone_bm are updated.
+ * memory_bm_find_bit - Find the bit for pfn in the memory
+ * bitmap
+ *
+ * Find the bit in the bitmap @bm that corresponds to given pfn.
+ * The cur.zone, cur.block and cur.node_pfn member of @bm are
+ * updated.
+ * It walks the radix tree to find the page which contains the bit for
+ * pfn and returns the bit position in **addr and *bit_nr.
*/
static int memory_bm_find_bit(struct memory_bitmap *bm, unsigned long pfn,
- void **addr, unsigned int *bit_nr)
+ void **addr, unsigned int *bit_nr)
{
- struct bm_block *bb;
+ struct mem_zone_bm_rtree *curr, *zone;
+ struct rtree_node *node;
+ int i, block_nr;
- /*
- * Check if the pfn corresponds to the current bitmap block and find
- * the block where it fits if this is not the case.
- */
- bb = bm->cur.block;
- if (pfn < bb->start_pfn)
- list_for_each_entry_continue_reverse(bb, &bm->blocks, hook)
- if (pfn >= bb->start_pfn)
- break;
+ zone = bm->cur.zone;
- if (pfn >= bb->end_pfn)
- list_for_each_entry_continue(bb, &bm->blocks, hook)
- if (pfn >= bb->start_pfn && pfn < bb->end_pfn)
- break;
+ if (pfn >= zone->start_pfn && pfn < zone->end_pfn)
+ goto zone_found;
- if (&bb->hook == &bm->blocks)
+ zone = NULL;
+
+ /* Find the right zone */
+ list_for_each_entry(curr, &bm->zones, list) {
+ if (pfn >= curr->start_pfn && pfn < curr->end_pfn) {
+ zone = curr;
+ break;
+ }
+ }
+
+ if (!zone)
return -EFAULT;
- /* The block has been found */
- bm->cur.block = bb;
- pfn -= bb->start_pfn;
- bm->cur.bit = pfn + 1;
- *bit_nr = pfn;
- *addr = bb->data;
+zone_found:
+ /*
+ * We have a zone. Now walk the radix tree to find the leave
+ * node for our pfn.
+ */
+
+ node = bm->cur.node;
+ if (((pfn - zone->start_pfn) & ~BM_BLOCK_MASK) == bm->cur.node_pfn)
+ goto node_found;
+
+ node = zone->rtree;
+ block_nr = (pfn - zone->start_pfn) >> BM_BLOCK_SHIFT;
+
+ for (i = zone->levels; i > 0; i--) {
+ int index;
+
+ index = block_nr >> ((i - 1) * BM_RTREE_LEVEL_SHIFT);
+ index &= BM_RTREE_LEVEL_MASK;
+ BUG_ON(node->data[index] == 0);
+ node = (struct rtree_node *)node->data[index];
+ }
+
+node_found:
+ /* Update last position */
+ bm->cur.zone = zone;
+ bm->cur.node = node;
+ bm->cur.node_pfn = (pfn - zone->start_pfn) & ~BM_BLOCK_MASK;
+
+ /* Set return values */
+ *addr = node->data;
+ *bit_nr = (pfn - zone->start_pfn) & BM_BLOCK_MASK;
+
return 0;
}
@@ -528,6 +710,7 @@
error = memory_bm_find_bit(bm, pfn, &addr, &bit);
if (!error)
set_bit(bit, addr);
+
return error;
}
@@ -542,6 +725,14 @@
clear_bit(bit, addr);
}
+static void memory_bm_clear_current(struct memory_bitmap *bm)
+{
+ int bit;
+
+ bit = max(bm->cur.node_bit - 1, 0);
+ clear_bit(bit, bm->cur.node->data);
+}
+
static int memory_bm_test_bit(struct memory_bitmap *bm, unsigned long pfn)
{
void *addr;
@@ -561,38 +752,70 @@
return !memory_bm_find_bit(bm, pfn, &addr, &bit);
}
-/**
- * memory_bm_next_pfn - find the pfn that corresponds to the next set bit
- * in the bitmap @bm. If the pfn cannot be found, BM_END_OF_MAP is
- * returned.
+/*
+ * rtree_next_node - Jumps to the next leave node
*
- * It is required to run memory_bm_position_reset() before the first call to
- * this function.
+ * Sets the position to the beginning of the next node in the
+ * memory bitmap. This is either the next node in the current
+ * zone's radix tree or the first node in the radix tree of the
+ * next zone.
+ *
+ * Returns true if there is a next node, false otherwise.
*/
+static bool rtree_next_node(struct memory_bitmap *bm)
+{
+ bm->cur.node = list_entry(bm->cur.node->list.next,
+ struct rtree_node, list);
+ if (&bm->cur.node->list != &bm->cur.zone->leaves) {
+ bm->cur.node_pfn += BM_BITS_PER_BLOCK;
+ bm->cur.node_bit = 0;
+ touch_softlockup_watchdog();
+ return true;
+ }
+ /* No more nodes, goto next zone */
+ bm->cur.zone = list_entry(bm->cur.zone->list.next,
+ struct mem_zone_bm_rtree, list);
+ if (&bm->cur.zone->list != &bm->zones) {
+ bm->cur.node = list_entry(bm->cur.zone->leaves.next,
+ struct rtree_node, list);
+ bm->cur.node_pfn = 0;
+ bm->cur.node_bit = 0;
+ return true;
+ }
+
+ /* No more zones */
+ return false;
+}
+
+/**
+ * memory_bm_rtree_next_pfn - Find the next set bit in the bitmap @bm
+ *
+ * Starting from the last returned position this function searches
+ * for the next set bit in the memory bitmap and returns its
+ * number. If no more bit is set BM_END_OF_MAP is returned.
+ *
+ * It is required to run memory_bm_position_reset() before the
+ * first call to this function.
+ */
static unsigned long memory_bm_next_pfn(struct memory_bitmap *bm)
{
- struct bm_block *bb;
+ unsigned long bits, pfn, pages;
int bit;
- bb = bm->cur.block;
do {
- bit = bm->cur.bit;
- bit = find_next_bit(bb->data, bm_block_bits(bb), bit);
- if (bit < bm_block_bits(bb))
- goto Return_pfn;
+ pages = bm->cur.zone->end_pfn - bm->cur.zone->start_pfn;
+ bits = min(pages - bm->cur.node_pfn, BM_BITS_PER_BLOCK);
+ bit = find_next_bit(bm->cur.node->data, bits,
+ bm->cur.node_bit);
+ if (bit < bits) {
+ pfn = bm->cur.zone->start_pfn + bm->cur.node_pfn + bit;
+ bm->cur.node_bit = bit + 1;
+ return pfn;
+ }
+ } while (rtree_next_node(bm));
- bb = list_entry(bb->hook.next, struct bm_block, hook);
- bm->cur.block = bb;
- bm->cur.bit = 0;
- } while (&bb->hook != &bm->blocks);
-
- memory_bm_position_reset(bm);
return BM_END_OF_MAP;
-
- Return_pfn:
- bm->cur.bit = bit + 1;
- return bb->start_pfn + bit;
}
/**
@@ -816,12 +1039,17 @@
unsigned int snapshot_additional_pages(struct zone *zone)
{
- unsigned int res;
+ unsigned int rtree, nodes;
- res = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK);
- res += DIV_ROUND_UP(res * sizeof(struct bm_block),
- LINKED_PAGE_DATA_SIZE);
- return 2 * res;
+ rtree = nodes = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK);
+ rtree += DIV_ROUND_UP(rtree * sizeof(struct rtree_node),
+ LINKED_PAGE_DATA_SIZE);
+ while (nodes > 1) {
+ nodes = DIV_ROUND_UP(nodes, BM_ENTRIES_PER_LEVEL);
+ rtree += nodes;
+ }
+
+ return 2 * rtree;
}
#ifdef CONFIG_HIGHMEM
@@ -1094,23 +1322,35 @@
void swsusp_free(void)
{
- struct zone *zone;
- unsigned long pfn, max_zone_pfn;
+ unsigned long fb_pfn, fr_pfn;
- for_each_populated_zone(zone) {
- max_zone_pfn = zone_end_pfn(zone);
- for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
- if (pfn_valid(pfn)) {
- struct page *page = pfn_to_page(pfn);
+ memory_bm_position_reset(forbidden_pages_map);
+ memory_bm_position_reset(free_pages_map);
- if (swsusp_page_is_forbidden(page) &&
- swsusp_page_is_free(page)) {
- swsusp_unset_page_forbidden(page);
- swsusp_unset_page_free(page);
- __free_page(page);
- }
- }
+loop:
+ fr_pfn = memory_bm_next_pfn(free_pages_map);
+ fb_pfn = memory_bm_next_pfn(forbidden_pages_map);
+
+ /*
+ * Find the next bit set in both bitmaps. This is guaranteed to
+ * terminate when fb_pfn == fr_pfn == BM_END_OF_MAP.
+ */
+ do {
+ if (fb_pfn < fr_pfn)
+ fb_pfn = memory_bm_next_pfn(forbidden_pages_map);
+ if (fr_pfn < fb_pfn)
+ fr_pfn = memory_bm_next_pfn(free_pages_map);
+ } while (fb_pfn != fr_pfn);
+
+ if (fr_pfn != BM_END_OF_MAP && pfn_valid(fr_pfn)) {
+ struct page *page = pfn_to_page(fr_pfn);
+
+ memory_bm_clear_current(forbidden_pages_map);
+ memory_bm_clear_current(free_pages_map);
+ __free_page(page);
+ goto loop;
}
+
nr_copy_pages = 0;
nr_meta_pages = 0;
restore_pblist = NULL;
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index 4dd8822..9a071be 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -31,20 +31,11 @@
#include "power.h"
-struct pm_sleep_state pm_states[PM_SUSPEND_MAX] = {
- [PM_SUSPEND_FREEZE] = { .label = "freeze", .state = PM_SUSPEND_FREEZE },
- [PM_SUSPEND_STANDBY] = { .label = "standby", },
- [PM_SUSPEND_MEM] = { .label = "mem", },
-};
+static const char *pm_labels[] = { "mem", "standby", "freeze", };
+const char *pm_states[PM_SUSPEND_MAX];
static const struct platform_suspend_ops *suspend_ops;
static const struct platform_freeze_ops *freeze_ops;
-
-static bool need_suspend_ops(suspend_state_t state)
-{
- return state > PM_SUSPEND_FREEZE;
-}
-
static DECLARE_WAIT_QUEUE_HEAD(suspend_freeze_wait_head);
static bool suspend_freeze_wake;
@@ -97,10 +88,7 @@
static int __init sleep_states_setup(char *str)
{
relative_states = !strncmp(str, "1", 1);
- if (relative_states) {
- pm_states[PM_SUSPEND_MEM].state = PM_SUSPEND_FREEZE;
- pm_states[PM_SUSPEND_FREEZE].state = 0;
- }
+ pm_states[PM_SUSPEND_FREEZE] = pm_labels[relative_states ? 0 : 2];
return 1;
}
@@ -113,20 +101,20 @@
void suspend_set_ops(const struct platform_suspend_ops *ops)
{
suspend_state_t i;
- int j = PM_SUSPEND_MAX - 1;
+ int j = 0;
lock_system_sleep();
suspend_ops = ops;
for (i = PM_SUSPEND_MEM; i >= PM_SUSPEND_STANDBY; i--)
- if (valid_state(i))
- pm_states[j--].state = i;
- else if (!relative_states)
- pm_states[j--].state = 0;
+ if (valid_state(i)) {
+ pm_states[i] = pm_labels[j++];
+ } else if (!relative_states) {
+ pm_states[i] = NULL;
+ j++;
+ }
- pm_states[j--].state = PM_SUSPEND_FREEZE;
- while (j >= PM_SUSPEND_MIN)
- pm_states[j--].state = 0;
+ pm_states[PM_SUSPEND_FREEZE] = pm_labels[j];
unlock_system_sleep();
}
@@ -145,6 +133,65 @@
}
EXPORT_SYMBOL_GPL(suspend_valid_only_mem);
+static bool sleep_state_supported(suspend_state_t state)
+{
+ return state == PM_SUSPEND_FREEZE || (suspend_ops && suspend_ops->enter);
+}
+
+static int platform_suspend_prepare(suspend_state_t state)
+{
+ return state != PM_SUSPEND_FREEZE && suspend_ops->prepare ?
+ suspend_ops->prepare() : 0;
+}
+
+static int platform_suspend_prepare_late(suspend_state_t state)
+{
+ return state != PM_SUSPEND_FREEZE && suspend_ops->prepare_late ?
+ suspend_ops->prepare_late() : 0;
+}
+
+static void platform_suspend_wake(suspend_state_t state)
+{
+ if (state != PM_SUSPEND_FREEZE && suspend_ops->wake)
+ suspend_ops->wake();
+}
+
+static void platform_suspend_finish(suspend_state_t state)
+{
+ if (state != PM_SUSPEND_FREEZE && suspend_ops->finish)
+ suspend_ops->finish();
+}
+
+static int platform_suspend_begin(suspend_state_t state)
+{
+ if (state == PM_SUSPEND_FREEZE && freeze_ops && freeze_ops->begin)
+ return freeze_ops->begin();
+ else if (suspend_ops->begin)
+ return suspend_ops->begin(state);
+ else
+ return 0;
+}
+
+static void platform_suspend_end(suspend_state_t state)
+{
+ if (state == PM_SUSPEND_FREEZE && freeze_ops && freeze_ops->end)
+ freeze_ops->end();
+ else if (suspend_ops->end)
+ suspend_ops->end();
+}
+
+static void platform_suspend_recover(suspend_state_t state)
+{
+ if (state != PM_SUSPEND_FREEZE && suspend_ops->recover)
+ suspend_ops->recover();
+}
+
+static bool platform_suspend_again(suspend_state_t state)
+{
+ return state != PM_SUSPEND_FREEZE && suspend_ops->suspend_again ?
+ suspend_ops->suspend_again() : false;
+}
+
static int suspend_test(int level)
{
#ifdef CONFIG_PM_DEBUG
@@ -168,7 +215,7 @@
{
int error;
- if (need_suspend_ops(state) && (!suspend_ops || !suspend_ops->enter))
+ if (!sleep_state_supported(state))
return -EPERM;
pm_prepare_console();
@@ -214,23 +261,18 @@
{
int error;
- if (need_suspend_ops(state) && suspend_ops->prepare) {
- error = suspend_ops->prepare();
- if (error)
- goto Platform_finish;
- }
+ error = platform_suspend_prepare(state);
+ if (error)
+ goto Platform_finish;
error = dpm_suspend_end(PMSG_SUSPEND);
if (error) {
printk(KERN_ERR "PM: Some devices failed to power down\n");
goto Platform_finish;
}
-
- if (need_suspend_ops(state) && suspend_ops->prepare_late) {
- error = suspend_ops->prepare_late();
- if (error)
- goto Platform_wake;
- }
+ error = platform_suspend_prepare_late(state);
+ if (error)
+ goto Platform_wake;
if (suspend_test(TEST_PLATFORM))
goto Platform_wake;
@@ -278,15 +320,11 @@
ftrace_start();
Platform_wake:
- if (need_suspend_ops(state) && suspend_ops->wake)
- suspend_ops->wake();
-
+ platform_suspend_wake(state);
dpm_resume_start(PMSG_RESUME);
Platform_finish:
- if (need_suspend_ops(state) && suspend_ops->finish)
- suspend_ops->finish();
-
+ platform_suspend_finish(state);
return error;
}
@@ -299,18 +337,13 @@
int error;
bool wakeup = false;
- if (need_suspend_ops(state) && !suspend_ops)
+ if (!sleep_state_supported(state))
return -ENOSYS;
- if (need_suspend_ops(state) && suspend_ops->begin) {
- error = suspend_ops->begin(state);
- if (error)
- goto Close;
- } else if (state == PM_SUSPEND_FREEZE && freeze_ops->begin) {
- error = freeze_ops->begin();
- if (error)
- goto Close;
- }
+ error = platform_suspend_begin(state);
+ if (error)
+ goto Close;
+
suspend_console();
suspend_test_start();
error = dpm_suspend_start(PMSG_SUSPEND);
@@ -324,25 +357,20 @@
do {
error = suspend_enter(state, &wakeup);
- } while (!error && !wakeup && need_suspend_ops(state)
- && suspend_ops->suspend_again && suspend_ops->suspend_again());
+ } while (!error && !wakeup && platform_suspend_again(state));
Resume_devices:
suspend_test_start();
dpm_resume_end(PMSG_RESUME);
suspend_test_finish("resume devices");
resume_console();
- Close:
- if (need_suspend_ops(state) && suspend_ops->end)
- suspend_ops->end();
- else if (state == PM_SUSPEND_FREEZE && freeze_ops->end)
- freeze_ops->end();
+ Close:
+ platform_suspend_end(state);
return error;
Recover_platform:
- if (need_suspend_ops(state) && suspend_ops->recover)
- suspend_ops->recover();
+ platform_suspend_recover(state);
goto Resume_devices;
}
@@ -395,7 +423,7 @@
printk("done.\n");
trace_suspend_resume(TPS("sync_filesystems"), 0, false);
- pr_debug("PM: Preparing system for %s sleep\n", pm_states[state].label);
+ pr_debug("PM: Preparing system for %s sleep\n", pm_states[state]);
error = suspend_prepare(state);
if (error)
goto Unlock;
@@ -404,7 +432,7 @@
goto Finish;
trace_suspend_resume(TPS("suspend_enter"), state, false);
- pr_debug("PM: Entering %s sleep\n", pm_states[state].label);
+ pr_debug("PM: Entering %s sleep\n", pm_states[state]);
pm_restrict_gfp_mask();
error = suspend_devices_and_enter(state);
pm_restore_gfp_mask();
diff --git a/kernel/power/suspend_test.c b/kernel/power/suspend_test.c
index 269b097..2f524928 100644
--- a/kernel/power/suspend_test.c
+++ b/kernel/power/suspend_test.c
@@ -92,13 +92,13 @@
}
if (state == PM_SUSPEND_MEM) {
- printk(info_test, pm_states[state].label);
+ printk(info_test, pm_states[state]);
status = pm_suspend(state);
if (status == -ENODEV)
state = PM_SUSPEND_STANDBY;
}
if (state == PM_SUSPEND_STANDBY) {
- printk(info_test, pm_states[state].label);
+ printk(info_test, pm_states[state]);
status = pm_suspend(state);
}
if (status < 0)
@@ -141,8 +141,8 @@
/* "=mem" ==> "mem" */
value++;
for (i = PM_SUSPEND_MIN; i < PM_SUSPEND_MAX; i++)
- if (!strcmp(pm_states[i].label, value)) {
- test_state = pm_states[i].state;
+ if (!strcmp(pm_states[i], value)) {
+ test_state = i;
return 0;
}
@@ -162,8 +162,8 @@
/* PM is initialized by now; is that state testable? */
if (test_state == PM_SUSPEND_ON)
goto done;
- if (!pm_states[test_state].state) {
- printk(warn_bad_state, pm_states[test_state].label);
+ if (!pm_states[test_state]) {
+ printk(warn_bad_state, pm_states[test_state]);
goto done;
}
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index f1ba773..625d0b0 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -206,6 +206,70 @@
rdp->passed_quiesce = 1;
}
+static DEFINE_PER_CPU(int, rcu_sched_qs_mask);
+
+static DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
+ .dynticks_nesting = DYNTICK_TASK_EXIT_IDLE,
+ .dynticks = ATOMIC_INIT(1),
+#ifdef CONFIG_NO_HZ_FULL_SYSIDLE
+ .dynticks_idle_nesting = DYNTICK_TASK_NEST_VALUE,
+ .dynticks_idle = ATOMIC_INIT(1),
+#endif /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
+};
+
+/*
+ * Let the RCU core know that this CPU has gone through the scheduler,
+ * which is a quiescent state. This is called when the need for a
+ * quiescent state is urgent, so we burn an atomic operation and full
+ * memory barriers to let the RCU core know about it, regardless of what
+ * this CPU might (or might not) do in the near future.
+ *
+ * We inform the RCU core by emulating a zero-duration dyntick-idle
+ * period, which we in turn do by incrementing the ->dynticks counter
+ * by two.
+ */
+static void rcu_momentary_dyntick_idle(void)
+{
+ unsigned long flags;
+ struct rcu_data *rdp;
+ struct rcu_dynticks *rdtp;
+ int resched_mask;
+ struct rcu_state *rsp;
+
+ local_irq_save(flags);
+
+ /*
+ * Yes, we can lose flag-setting operations. This is OK, because
+ * the flag will be set again after some delay.
+ */
+ resched_mask = raw_cpu_read(rcu_sched_qs_mask);
+ raw_cpu_write(rcu_sched_qs_mask, 0);
+
+ /* Find the flavor that needs a quiescent state. */
+ for_each_rcu_flavor(rsp) {
+ rdp = raw_cpu_ptr(rsp->rda);
+ if (!(resched_mask & rsp->flavor_mask))
+ continue;
+ smp_mb(); /* rcu_sched_qs_mask before cond_resched_completed. */
+ if (ACCESS_ONCE(rdp->mynode->completed) !=
+ ACCESS_ONCE(rdp->cond_resched_completed))
+ continue;
+
+ /*
+ * Pretend to be momentarily idle for the quiescent state.
+ * This allows the grace-period kthread to record the
+ * quiescent state, with no need for this CPU to do anything
+ * further.
+ */
+ rdtp = this_cpu_ptr(&rcu_dynticks);
+ smp_mb__before_atomic(); /* Earlier stuff before QS. */
+ atomic_add(2, &rdtp->dynticks); /* QS. */
+ smp_mb__after_atomic(); /* Later stuff after QS. */
+ break;
+ }
+ local_irq_restore(flags);
+}
+
/*
* Note a context switch. This is a quiescent state for RCU-sched,
* and requires special handling for preemptible RCU.
@@ -216,19 +280,12 @@
trace_rcu_utilization(TPS("Start context switch"));
rcu_sched_qs(cpu);
rcu_preempt_note_context_switch(cpu);
+ if (unlikely(raw_cpu_read(rcu_sched_qs_mask)))
+ rcu_momentary_dyntick_idle();
trace_rcu_utilization(TPS("End context switch"));
}
EXPORT_SYMBOL_GPL(rcu_note_context_switch);
-static DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
- .dynticks_nesting = DYNTICK_TASK_EXIT_IDLE,
- .dynticks = ATOMIC_INIT(1),
-#ifdef CONFIG_NO_HZ_FULL_SYSIDLE
- .dynticks_idle_nesting = DYNTICK_TASK_NEST_VALUE,
- .dynticks_idle = ATOMIC_INIT(1),
-#endif /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
-};
-
static long blimit = 10; /* Maximum callbacks per rcu_do_batch. */
static long qhimark = 10000; /* If this many pending, ignore blimit. */
static long qlowmark = 100; /* Once only this many pending, use blimit. */
@@ -243,6 +300,13 @@
module_param(jiffies_till_first_fqs, ulong, 0644);
module_param(jiffies_till_next_fqs, ulong, 0644);
+/*
+ * How long the grace period must be before we start recruiting
+ * quiescent-state help from rcu_note_context_switch().
+ */
+static ulong jiffies_till_sched_qs = HZ / 20;
+module_param(jiffies_till_sched_qs, ulong, 0644);
+
static bool rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp,
struct rcu_data *rdp);
static void force_qs_rnp(struct rcu_state *rsp,
@@ -853,6 +917,7 @@
bool *isidle, unsigned long *maxj)
{
unsigned int curr;
+ int *rcrmp;
unsigned int snap;
curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
@@ -893,27 +958,43 @@
}
/*
- * There is a possibility that a CPU in adaptive-ticks state
- * might run in the kernel with the scheduling-clock tick disabled
- * for an extended time period. Invoke rcu_kick_nohz_cpu() to
- * force the CPU to restart the scheduling-clock tick in this
- * CPU is in this state.
+ * A CPU running for an extended time within the kernel can
+ * delay RCU grace periods. When the CPU is in NO_HZ_FULL mode,
+ * even context-switching back and forth between a pair of
+ * in-kernel CPU-bound tasks cannot advance grace periods.
+ * So if the grace period is old enough, make the CPU pay attention.
+ * Note that the unsynchronized assignments to the per-CPU
+ * rcu_sched_qs_mask variable are safe. Yes, setting of
+ * bits can be lost, but they will be set again on the next
+ * force-quiescent-state pass. So lost bit sets do not result
+ * in incorrect behavior, merely in a grace period lasting
+ * a few jiffies longer than it might otherwise. Because
+ * there are at most four threads involved, and because the
+ * updates are only once every few jiffies, the probability of
+ * lossage (and thus of slight grace-period extension) is
+ * quite low.
+ *
+ * Note that if the jiffies_till_sched_qs boot/sysfs parameter
+ * is set too high, we override with half of the RCU CPU stall
+ * warning delay.
*/
- rcu_kick_nohz_cpu(rdp->cpu);
-
- /*
- * Alternatively, the CPU might be running in the kernel
- * for an extended period of time without a quiescent state.
- * Attempt to force the CPU through the scheduler to gain the
- * needed quiescent state, but only if the grace period has gone
- * on for an uncommonly long time. If there are many stuck CPUs,
- * we will beat on the first one until it gets unstuck, then move
- * to the next. Only do this for the primary flavor of RCU.
- */
- if (rdp->rsp == rcu_state_p &&
+ rcrmp = &per_cpu(rcu_sched_qs_mask, rdp->cpu);
+ if (ULONG_CMP_GE(jiffies,
+ rdp->rsp->gp_start + jiffies_till_sched_qs) ||
ULONG_CMP_GE(jiffies, rdp->rsp->jiffies_resched)) {
- rdp->rsp->jiffies_resched += 5;
- resched_cpu(rdp->cpu);
+ if (!(ACCESS_ONCE(*rcrmp) & rdp->rsp->flavor_mask)) {
+ ACCESS_ONCE(rdp->cond_resched_completed) =
+ ACCESS_ONCE(rdp->mynode->completed);
+ smp_mb(); /* ->cond_resched_completed before *rcrmp. */
+ ACCESS_ONCE(*rcrmp) =
+ ACCESS_ONCE(*rcrmp) + rdp->rsp->flavor_mask;
+ resched_cpu(rdp->cpu); /* Force CPU into scheduler. */
+ rdp->rsp->jiffies_resched += 5; /* Enable beating. */
+ } else if (ULONG_CMP_GE(jiffies, rdp->rsp->jiffies_resched)) {
+ /* Time to beat on that CPU again! */
+ resched_cpu(rdp->cpu); /* Force CPU into scheduler. */
+ rdp->rsp->jiffies_resched += 5; /* Re-enable beating. */
+ }
}
return 0;
@@ -3491,6 +3572,7 @@
"rcu_node_fqs_1",
"rcu_node_fqs_2",
"rcu_node_fqs_3" }; /* Match MAX_RCU_LVLS */
+ static u8 fl_mask = 0x1;
int cpustride = 1;
int i;
int j;
@@ -3509,6 +3591,8 @@
for (i = 1; i < rcu_num_lvls; i++)
rsp->level[i] = rsp->level[i - 1] + rsp->levelcnt[i - 1];
rcu_init_levelspread(rsp);
+ rsp->flavor_mask = fl_mask;
+ fl_mask <<= 1;
/* Initialize the elements themselves, starting from the leaves. */
diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
index bf2c1e6..0f69a79 100644
--- a/kernel/rcu/tree.h
+++ b/kernel/rcu/tree.h
@@ -307,6 +307,9 @@
/* 4) reasons this CPU needed to be kicked by force_quiescent_state */
unsigned long dynticks_fqs; /* Kicked due to dynticks idle. */
unsigned long offline_fqs; /* Kicked due to being offline. */
+ unsigned long cond_resched_completed;
+ /* Grace period that needs help */
+ /* from cond_resched(). */
/* 5) __rcu_pending() statistics. */
unsigned long n_rcu_pending; /* rcu_pending() calls since boot. */
@@ -392,6 +395,7 @@
struct rcu_node *level[RCU_NUM_LVLS]; /* Hierarchy levels. */
u32 levelcnt[MAX_RCU_LVLS + 1]; /* # nodes in each level. */
u8 levelspread[RCU_NUM_LVLS]; /* kids/node in each level. */
+ u8 flavor_mask; /* bit in flavor mask. */
struct rcu_data __percpu *rda; /* pointer of percu rcu_data. */
void (*call)(struct rcu_head *head, /* call_rcu() flavor. */
void (*func)(struct rcu_head *head));
@@ -563,7 +567,7 @@
static void do_nocb_deferred_wakeup(struct rcu_data *rdp);
static void rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp);
static void rcu_spawn_nocb_kthreads(struct rcu_state *rsp);
-static void rcu_kick_nohz_cpu(int cpu);
+static void __maybe_unused rcu_kick_nohz_cpu(int cpu);
static bool init_nocb_callback_list(struct rcu_data *rdp);
static void rcu_sysidle_enter(struct rcu_dynticks *rdtp, int irq);
static void rcu_sysidle_exit(struct rcu_dynticks *rdtp, int irq);
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index cbc2c45..02ac0fb 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -2404,7 +2404,7 @@
* if an adaptive-ticks CPU is failing to respond to the current grace
* period and has not be idle from an RCU perspective, kick it.
*/
-static void rcu_kick_nohz_cpu(int cpu)
+static void __maybe_unused rcu_kick_nohz_cpu(int cpu)
{
#ifdef CONFIG_NO_HZ_FULL
if (tick_nohz_full_cpu(cpu))
diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
index a2aeb4d..bc78835 100644
--- a/kernel/rcu/update.c
+++ b/kernel/rcu/update.c
@@ -200,12 +200,12 @@
EXPORT_SYMBOL_GPL(wait_rcu_gp);
#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
-static inline void debug_init_rcu_head(struct rcu_head *head)
+void init_rcu_head(struct rcu_head *head)
{
debug_object_init(head, &rcuhead_debug_descr);
}
-static inline void debug_rcu_head_free(struct rcu_head *head)
+void destroy_rcu_head(struct rcu_head *head)
{
debug_object_free(head, &rcuhead_debug_descr);
}
@@ -350,21 +350,3 @@
early_initcall(check_cpu_stall_init);
#endif /* #ifdef CONFIG_RCU_STALL_COMMON */
-
-/*
- * Hooks for cond_resched() and friends to avoid RCU CPU stall warnings.
- */
-
-DEFINE_PER_CPU(int, rcu_cond_resched_count);
-
-/*
- * Report a set of RCU quiescent states, for use by cond_resched()
- * and friends. Out of line due to being called infrequently.
- */
-void rcu_resched(void)
-{
- preempt_disable();
- __this_cpu_write(rcu_cond_resched_count, 0);
- rcu_note_context_switch(smp_processor_id());
- preempt_enable();
-}
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 3bdf01b..bc1638b 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -4147,7 +4147,6 @@
int __sched _cond_resched(void)
{
- rcu_cond_resched();
if (should_resched()) {
__cond_resched();
return 1;
@@ -4166,18 +4165,15 @@
*/
int __cond_resched_lock(spinlock_t *lock)
{
- bool need_rcu_resched = rcu_should_resched();
int resched = should_resched();
int ret = 0;
lockdep_assert_held(lock);
- if (spin_needbreak(lock) || resched || need_rcu_resched) {
+ if (spin_needbreak(lock) || resched) {
spin_unlock(lock);
if (resched)
__cond_resched();
- else if (unlikely(need_rcu_resched))
- rcu_resched();
else
cpu_relax();
ret = 1;
@@ -4191,7 +4187,6 @@
{
BUG_ON(!in_softirq());
- rcu_cond_resched(); /* BH disabled OK, just recording QSes. */
if (should_resched()) {
local_bh_enable();
__cond_resched();
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index 695f977..627b3c3 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -608,7 +608,7 @@
avg_atom = p->se.sum_exec_runtime;
if (nr_switches)
- do_div(avg_atom, nr_switches);
+ avg_atom = div64_ul(avg_atom, nr_switches);
else
avg_atom = -1LL;
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
index cf009fb..658a58d 100644
--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
@@ -147,8 +147,6 @@
clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &dev->cpu))
goto use_default;
- trace_cpu_idle_rcuidle(next_state, dev->cpu);
-
/*
* Enter the idle state previously returned by the governor decision.
* This function will block until an interrupt occurs and will take
@@ -156,8 +154,6 @@
*/
entered_state = cpuidle_enter(drv, dev, next_state);
- trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu);
-
if (broadcast)
clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &dev->cpu);
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
index 88c9c65..fe75444 100644
--- a/kernel/time/alarmtimer.c
+++ b/kernel/time/alarmtimer.c
@@ -585,9 +585,14 @@
struct itimerspec *new_setting,
struct itimerspec *old_setting)
{
+ ktime_t exp;
+
if (!rtcdev)
return -ENOTSUPP;
+ if (flags & ~TIMER_ABSTIME)
+ return -EINVAL;
+
if (old_setting)
alarm_timer_get(timr, old_setting);
@@ -597,8 +602,16 @@
/* start the timer */
timr->it.alarm.interval = timespec_to_ktime(new_setting->it_interval);
- alarm_start(&timr->it.alarm.alarmtimer,
- timespec_to_ktime(new_setting->it_value));
+ exp = timespec_to_ktime(new_setting->it_value);
+ /* Convert (if necessary) to absolute time */
+ if (flags != TIMER_ABSTIME) {
+ ktime_t now;
+
+ now = alarm_bases[timr->it.alarm.alarmtimer.type].gettime();
+ exp = ktime_add(now, exp);
+ }
+
+ alarm_start(&timr->it.alarm.alarmtimer, exp);
return 0;
}
@@ -730,6 +743,9 @@
if (!alarmtimer_get_rtcdev())
return -ENOTSUPP;
+ if (flags & ~TIMER_ABSTIME)
+ return -EINVAL;
+
if (!capable(CAP_WAKE_ALARM))
return -EPERM;
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 5b372e3..ac9d1da 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -265,12 +265,12 @@
func = ftrace_ops_list_func;
}
+ update_function_graph_func();
+
/* If there's no change, then do nothing more here */
if (ftrace_trace_function == func)
return;
- update_function_graph_func();
-
/*
* If we are using the list function, it doesn't care
* about the function_trace_ops.
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 7c56c3d..ff70271 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -616,10 +616,6 @@
struct ring_buffer_per_cpu *cpu_buffer;
struct rb_irq_work *work;
- if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) ||
- (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu)))
- return POLLIN | POLLRDNORM;
-
if (cpu == RING_BUFFER_ALL_CPUS)
work = &buffer->irq_work;
else {
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index f243444..291397e 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -466,6 +466,12 @@
struct print_entry *entry;
unsigned long irq_flags;
int alloc;
+ int pc;
+
+ if (!(trace_flags & TRACE_ITER_PRINTK))
+ return 0;
+
+ pc = preempt_count();
if (unlikely(tracing_selftest_running || tracing_disabled))
return 0;
@@ -475,7 +481,7 @@
local_save_flags(irq_flags);
buffer = global_trace.trace_buffer.buffer;
event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
- irq_flags, preempt_count());
+ irq_flags, pc);
if (!event)
return 0;
@@ -492,6 +498,7 @@
entry->buf[size] = '\0';
__buffer_unlock_commit(buffer, event);
+ ftrace_trace_stack(buffer, irq_flags, 4, pc);
return size;
}
@@ -509,6 +516,12 @@
struct bputs_entry *entry;
unsigned long irq_flags;
int size = sizeof(struct bputs_entry);
+ int pc;
+
+ if (!(trace_flags & TRACE_ITER_PRINTK))
+ return 0;
+
+ pc = preempt_count();
if (unlikely(tracing_selftest_running || tracing_disabled))
return 0;
@@ -516,7 +529,7 @@
local_save_flags(irq_flags);
buffer = global_trace.trace_buffer.buffer;
event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
- irq_flags, preempt_count());
+ irq_flags, pc);
if (!event)
return 0;
@@ -525,6 +538,7 @@
entry->str = str;
__buffer_unlock_commit(buffer, event);
+ ftrace_trace_stack(buffer, irq_flags, 4, pc);
return 1;
}
@@ -809,7 +823,7 @@
{ trace_clock_local, "local", 1 },
{ trace_clock_global, "global", 1 },
{ trace_clock_counter, "counter", 0 },
- { trace_clock_jiffies, "uptime", 1 },
+ { trace_clock_jiffies, "uptime", 0 },
{ trace_clock, "perf", 1 },
ARCH_TRACE_CLOCKS
};
diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c
index 26dc348..57b67b1 100644
--- a/kernel/trace/trace_clock.c
+++ b/kernel/trace/trace_clock.c
@@ -59,13 +59,14 @@
/*
* trace_jiffy_clock(): Simply use jiffies as a clock counter.
+ * Note that this use of jiffies_64 is not completely safe on
+ * 32-bit systems. But the window is tiny, and the effect if
+ * we are affected is that we will have an obviously bogus
+ * timestamp on a trace event - i.e. not life threatening.
*/
u64 notrace trace_clock_jiffies(void)
{
- u64 jiffy = jiffies - INITIAL_JIFFIES;
-
- /* Return nsecs */
- return (u64)jiffies_to_usecs(jiffy) * 1000ULL;
+ return jiffies_64_to_clock_t(jiffies_64 - INITIAL_JIFFIES);
}
/*
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index f99e0b3..2de53628 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -470,6 +470,7 @@
list_del(&file->list);
remove_subsystem(file->system);
+ free_event_filter(file->filter);
kmem_cache_free(file_cachep, file);
}
diff --git a/lib/cpumask.c b/lib/cpumask.c
index c101230..b6513a9 100644
--- a/lib/cpumask.c
+++ b/lib/cpumask.c
@@ -191,7 +191,7 @@
i %= num_online_cpus();
- if (!cpumask_of_node(numa_node)) {
+ if (numa_node == -1 || !cpumask_of_node(numa_node)) {
/* Use all online cpu's for non numa aware system */
cpumask_copy(mask, cpu_online_mask);
} else {
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 2024bbd..9221c02 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2604,6 +2604,7 @@
} else {
if (cow)
huge_ptep_set_wrprotect(src, addr, src_pte);
+ entry = huge_ptep_get(src_pte);
ptepage = pte_page(entry);
get_page(ptepage);
page_dup_rmap(ptepage);
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index c6399e3..7211a73 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -435,7 +435,7 @@
if (av == NULL) /* Not actually mapped anymore */
return;
- pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
+ pgoff = page_to_pgoff(page);
read_lock(&tasklist_lock);
for_each_process (tsk) {
struct anon_vma_chain *vmac;
@@ -469,7 +469,7 @@
mutex_lock(&mapping->i_mmap_mutex);
read_lock(&tasklist_lock);
for_each_process(tsk) {
- pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
+ pgoff_t pgoff = page_to_pgoff(page);
struct task_struct *t = task_early_kill(tsk, force_early);
if (!t)
diff --git a/mm/memory.c b/mm/memory.c
index d67fd9f..7e8d820 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2882,7 +2882,8 @@
* if page by the offset is not ready to be mapped (cold cache or
* something).
*/
- if (vma->vm_ops->map_pages && fault_around_pages() > 1) {
+ if (vma->vm_ops->map_pages && !(flags & FAULT_FLAG_NONLINEAR) &&
+ fault_around_pages() > 1) {
pte = pte_offset_map_lock(mm, pmd, address, &ptl);
do_fault_around(vma, address, pte, pgoff, flags);
if (!pte_same(*pte, orig_pte))
diff --git a/mm/migrate.c b/mm/migrate.c
index 9e0beaa..be6dbf9 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -988,9 +988,10 @@
* it. Otherwise, putback_lru_page() will drop the reference grabbed
* during isolation.
*/
- if (rc != MIGRATEPAGE_SUCCESS && put_new_page)
+ if (rc != MIGRATEPAGE_SUCCESS && put_new_page) {
+ ClearPageSwapBacked(newpage);
put_new_page(newpage, private);
- else
+ } else
putback_lru_page(newpage);
if (result) {
diff --git a/mm/rmap.c b/mm/rmap.c
index b7e94eb..22a4a76 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -517,11 +517,7 @@
static inline unsigned long
__vma_address(struct page *page, struct vm_area_struct *vma)
{
- pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
-
- if (unlikely(is_vm_hugetlb_page(vma)))
- pgoff = page->index << huge_page_order(page_hstate(page));
-
+ pgoff_t pgoff = page_to_pgoff(page);
return vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
}
@@ -1639,7 +1635,7 @@
static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc)
{
struct anon_vma *anon_vma;
- pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
+ pgoff_t pgoff = page_to_pgoff(page);
struct anon_vma_chain *avc;
int ret = SWAP_AGAIN;
@@ -1680,7 +1676,7 @@
static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc)
{
struct address_space *mapping = page->mapping;
- pgoff_t pgoff = page->index << compound_order(page);
+ pgoff_t pgoff = page_to_pgoff(page);
struct vm_area_struct *vma;
int ret = SWAP_AGAIN;
diff --git a/mm/shmem.c b/mm/shmem.c
index 1140f49..af68b15 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -85,7 +85,7 @@
* a time): we would prefer not to enlarge the shmem inode just for that.
*/
struct shmem_falloc {
- int mode; /* FALLOC_FL mode currently operating */
+ wait_queue_head_t *waitq; /* faults into hole wait for punch to end */
pgoff_t start; /* start of range currently being fallocated */
pgoff_t next; /* the next page offset to be fallocated */
pgoff_t nr_falloced; /* how many new pages have been fallocated */
@@ -468,23 +468,20 @@
return;
index = start;
- for ( ; ; ) {
+ while (index < end) {
cond_resched();
pvec.nr = find_get_entries(mapping, index,
min(end - index, (pgoff_t)PAGEVEC_SIZE),
pvec.pages, indices);
if (!pvec.nr) {
- if (index == start || unfalloc)
+ /* If all gone or hole-punch or unfalloc, we're done */
+ if (index == start || end != -1)
break;
+ /* But if truncating, restart to make sure all gone */
index = start;
continue;
}
- if ((index == start || unfalloc) && indices[0] >= end) {
- pagevec_remove_exceptionals(&pvec);
- pagevec_release(&pvec);
- break;
- }
mem_cgroup_uncharge_start();
for (i = 0; i < pagevec_count(&pvec); i++) {
struct page *page = pvec.pages[i];
@@ -496,8 +493,12 @@
if (radix_tree_exceptional_entry(page)) {
if (unfalloc)
continue;
- nr_swaps_freed += !shmem_free_swap(mapping,
- index, page);
+ if (shmem_free_swap(mapping, index, page)) {
+ /* Swap was replaced by page: retry */
+ index--;
+ break;
+ }
+ nr_swaps_freed++;
continue;
}
@@ -506,6 +507,11 @@
if (page->mapping == mapping) {
VM_BUG_ON_PAGE(PageWriteback(page), page);
truncate_inode_page(mapping, page);
+ } else {
+ /* Page was replaced by swap: retry */
+ unlock_page(page);
+ index--;
+ break;
}
}
unlock_page(page);
@@ -760,7 +766,7 @@
spin_lock(&inode->i_lock);
shmem_falloc = inode->i_private;
if (shmem_falloc &&
- !shmem_falloc->mode &&
+ !shmem_falloc->waitq &&
index >= shmem_falloc->start &&
index < shmem_falloc->next)
shmem_falloc->nr_unswapped++;
@@ -1248,38 +1254,58 @@
* Trinity finds that probing a hole which tmpfs is punching can
* prevent the hole-punch from ever completing: which in turn
* locks writers out with its hold on i_mutex. So refrain from
- * faulting pages into the hole while it's being punched, and
- * wait on i_mutex to be released if vmf->flags permits.
+ * faulting pages into the hole while it's being punched. Although
+ * shmem_undo_range() does remove the additions, it may be unable to
+ * keep up, as each new page needs its own unmap_mapping_range() call,
+ * and the i_mmap tree grows ever slower to scan if new vmas are added.
+ *
+ * It does not matter if we sometimes reach this check just before the
+ * hole-punch begins, so that one fault then races with the punch:
+ * we just need to make racing faults a rare case.
+ *
+ * The implementation below would be much simpler if we just used a
+ * standard mutex or completion: but we cannot take i_mutex in fault,
+ * and bloating every shmem inode for this unlikely case would be sad.
*/
if (unlikely(inode->i_private)) {
struct shmem_falloc *shmem_falloc;
spin_lock(&inode->i_lock);
shmem_falloc = inode->i_private;
- if (!shmem_falloc ||
- shmem_falloc->mode != FALLOC_FL_PUNCH_HOLE ||
- vmf->pgoff < shmem_falloc->start ||
- vmf->pgoff >= shmem_falloc->next)
- shmem_falloc = NULL;
- spin_unlock(&inode->i_lock);
- /*
- * i_lock has protected us from taking shmem_falloc seriously
- * once return from shmem_fallocate() went back up that stack.
- * i_lock does not serialize with i_mutex at all, but it does
- * not matter if sometimes we wait unnecessarily, or sometimes
- * miss out on waiting: we just need to make those cases rare.
- */
- if (shmem_falloc) {
+ if (shmem_falloc &&
+ shmem_falloc->waitq &&
+ vmf->pgoff >= shmem_falloc->start &&
+ vmf->pgoff < shmem_falloc->next) {
+ wait_queue_head_t *shmem_falloc_waitq;
+ DEFINE_WAIT(shmem_fault_wait);
+
+ ret = VM_FAULT_NOPAGE;
if ((vmf->flags & FAULT_FLAG_ALLOW_RETRY) &&
!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
+ /* It's polite to up mmap_sem if we can */
up_read(&vma->vm_mm->mmap_sem);
- mutex_lock(&inode->i_mutex);
- mutex_unlock(&inode->i_mutex);
- return VM_FAULT_RETRY;
+ ret = VM_FAULT_RETRY;
}
- /* cond_resched? Leave that to GUP or return to user */
- return VM_FAULT_NOPAGE;
+
+ shmem_falloc_waitq = shmem_falloc->waitq;
+ prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait,
+ TASK_UNINTERRUPTIBLE);
+ spin_unlock(&inode->i_lock);
+ schedule();
+
+ /*
+ * shmem_falloc_waitq points into the shmem_fallocate()
+ * stack of the hole-punching task: shmem_falloc_waitq
+ * is usually invalid by the time we reach here, but
+ * finish_wait() does not dereference it in that case;
+ * though i_lock needed lest racing with wake_up_all().
+ */
+ spin_lock(&inode->i_lock);
+ finish_wait(shmem_falloc_waitq, &shmem_fault_wait);
+ spin_unlock(&inode->i_lock);
+ return ret;
}
+ spin_unlock(&inode->i_lock);
}
error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret);
@@ -1774,13 +1800,13 @@
mutex_lock(&inode->i_mutex);
- shmem_falloc.mode = mode & ~FALLOC_FL_KEEP_SIZE;
-
if (mode & FALLOC_FL_PUNCH_HOLE) {
struct address_space *mapping = file->f_mapping;
loff_t unmap_start = round_up(offset, PAGE_SIZE);
loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1;
+ DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq);
+ shmem_falloc.waitq = &shmem_falloc_waitq;
shmem_falloc.start = unmap_start >> PAGE_SHIFT;
shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT;
spin_lock(&inode->i_lock);
@@ -1792,8 +1818,13 @@
1 + unmap_end - unmap_start, 0);
shmem_truncate_range(inode, offset, offset + len - 1);
/* No need to unmap again: hole-punching leaves COWed pages */
+
+ spin_lock(&inode->i_lock);
+ inode->i_private = NULL;
+ wake_up_all(&shmem_falloc_waitq);
+ spin_unlock(&inode->i_lock);
error = 0;
- goto undone;
+ goto out;
}
/* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
@@ -1809,6 +1840,7 @@
goto out;
}
+ shmem_falloc.waitq = NULL;
shmem_falloc.start = start;
shmem_falloc.next = start;
shmem_falloc.nr_falloced = 0;
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 735e01a..d31c4ba 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -55,7 +55,7 @@
continue;
}
-#if !defined(CONFIG_SLUB) || !defined(CONFIG_SLUB_DEBUG_ON)
+#if !defined(CONFIG_SLUB)
if (!strcmp(s->name, name)) {
pr_err("%s (%s): Cache name already exists.\n",
__func__, name);
diff --git a/mm/truncate.c b/mm/truncate.c
index 6a78c81..eda2473 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -355,14 +355,16 @@
for ( ; ; ) {
cond_resched();
if (!pagevec_lookup_entries(&pvec, mapping, index,
- min(end - index, (pgoff_t)PAGEVEC_SIZE),
- indices)) {
+ min(end - index, (pgoff_t)PAGEVEC_SIZE), indices)) {
+ /* If all gone from start onwards, we're done */
if (index == start)
break;
+ /* Otherwise restart to make sure all gone */
index = start;
continue;
}
if (index == start && indices[0] >= end) {
+ /* All gone out of hole to be punched, we're done */
pagevec_remove_exceptionals(&pvec);
pagevec_release(&pvec);
break;
@@ -373,8 +375,11 @@
/* We rely upon deletion not changing page->index */
index = indices[i];
- if (index >= end)
+ if (index >= end) {
+ /* Restart punch to make sure all gone */
+ index = start - 1;
break;
+ }
if (radix_tree_exceptional_entry(page)) {
clear_exceptional_entry(mapping, index, page);
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index ad2ac3c..dd11f61 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -627,8 +627,6 @@
struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
int i;
- free_percpu(vlan->vlan_pcpu_stats);
- vlan->vlan_pcpu_stats = NULL;
for (i = 0; i < ARRAY_SIZE(vlan->egress_priority_map); i++) {
while ((pm = vlan->egress_priority_map[i]) != NULL) {
vlan->egress_priority_map[i] = pm->next;
@@ -785,6 +783,15 @@
.ndo_get_lock_subclass = vlan_dev_get_lock_subclass,
};
+static void vlan_dev_free(struct net_device *dev)
+{
+ struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
+
+ free_percpu(vlan->vlan_pcpu_stats);
+ vlan->vlan_pcpu_stats = NULL;
+ free_netdev(dev);
+}
+
void vlan_setup(struct net_device *dev)
{
ether_setup(dev);
@@ -794,7 +801,7 @@
dev->tx_queue_len = 0;
dev->netdev_ops = &vlan_netdev_ops;
- dev->destructor = free_netdev;
+ dev->destructor = vlan_dev_free;
dev->ethtool_ops = &vlan_ethtool_ops;
memset(dev->broadcast, 0, ETH_ALEN);
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
index 01a1082..bfcf6be 100644
--- a/net/appletalk/ddp.c
+++ b/net/appletalk/ddp.c
@@ -1489,8 +1489,6 @@
goto drop;
/* Queue packet (standard) */
- skb->sk = sock;
-
if (sock_queue_rcv_skb(sock, skb) < 0)
goto drop;
@@ -1644,7 +1642,6 @@
if (!skb)
goto out;
- skb->sk = sk;
skb_reserve(skb, ddp_dl->header_length);
skb_reserve(skb, dev->hard_header_len);
skb->dev = dev;
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
index 6f0d9ec..a957c81 100644
--- a/net/batman-adv/bridge_loop_avoidance.c
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@ -800,11 +800,6 @@
bla_dst = (struct batadv_bla_claim_dst *)hw_dst;
bla_dst_own = &bat_priv->bla.claim_dest;
- /* check if it is a claim packet in general */
- if (memcmp(bla_dst->magic, bla_dst_own->magic,
- sizeof(bla_dst->magic)) != 0)
- return 0;
-
/* if announcement packet, use the source,
* otherwise assume it is in the hw_src
*/
@@ -866,12 +861,13 @@
struct batadv_hard_iface *primary_if,
struct sk_buff *skb)
{
- struct batadv_bla_claim_dst *bla_dst;
+ struct batadv_bla_claim_dst *bla_dst, *bla_dst_own;
uint8_t *hw_src, *hw_dst;
- struct vlan_ethhdr *vhdr;
+ struct vlan_hdr *vhdr, vhdr_buf;
struct ethhdr *ethhdr;
struct arphdr *arphdr;
unsigned short vid;
+ int vlan_depth = 0;
__be16 proto;
int headlen;
int ret;
@@ -882,9 +878,24 @@
proto = ethhdr->h_proto;
headlen = ETH_HLEN;
if (vid & BATADV_VLAN_HAS_TAG) {
- vhdr = vlan_eth_hdr(skb);
- proto = vhdr->h_vlan_encapsulated_proto;
- headlen += VLAN_HLEN;
+ /* Traverse the VLAN/Ethertypes.
+ *
+ * At this point it is known that the first protocol is a VLAN
+ * header, so start checking at the encapsulated protocol.
+ *
+ * The depth of the VLAN headers is recorded to drop BLA claim
+ * frames encapsulated into multiple VLAN headers (QinQ).
+ */
+ do {
+ vhdr = skb_header_pointer(skb, headlen, VLAN_HLEN,
+ &vhdr_buf);
+ if (!vhdr)
+ return 0;
+
+ proto = vhdr->h_vlan_encapsulated_proto;
+ headlen += VLAN_HLEN;
+ vlan_depth++;
+ } while (proto == htons(ETH_P_8021Q));
}
if (proto != htons(ETH_P_ARP))
@@ -914,6 +925,19 @@
hw_src = (uint8_t *)arphdr + sizeof(struct arphdr);
hw_dst = hw_src + ETH_ALEN + 4;
bla_dst = (struct batadv_bla_claim_dst *)hw_dst;
+ bla_dst_own = &bat_priv->bla.claim_dest;
+
+ /* check if it is a claim frame in general */
+ if (memcmp(bla_dst->magic, bla_dst_own->magic,
+ sizeof(bla_dst->magic)) != 0)
+ return 0;
+
+ /* check if there is a claim frame encapsulated deeper in (QinQ) and
+ * drop that, as this is not supported by BLA but should also not be
+ * sent via the mesh.
+ */
+ if (vlan_depth > 1)
+ return 1;
/* check if it is a claim frame. */
ret = batadv_check_claim_group(bat_priv, primary_if, hw_src, hw_dst,
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index e7ee65d..cbd677f 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -448,10 +448,15 @@
* possibly free it
* @softif_vlan: the vlan object to release
*/
-void batadv_softif_vlan_free_ref(struct batadv_softif_vlan *softif_vlan)
+void batadv_softif_vlan_free_ref(struct batadv_softif_vlan *vlan)
{
- if (atomic_dec_and_test(&softif_vlan->refcount))
- kfree_rcu(softif_vlan, rcu);
+ if (atomic_dec_and_test(&vlan->refcount)) {
+ spin_lock_bh(&vlan->bat_priv->softif_vlan_list_lock);
+ hlist_del_rcu(&vlan->list);
+ spin_unlock_bh(&vlan->bat_priv->softif_vlan_list_lock);
+
+ kfree_rcu(vlan, rcu);
+ }
}
/**
@@ -505,6 +510,7 @@
if (!vlan)
return -ENOMEM;
+ vlan->bat_priv = bat_priv;
vlan->vid = vid;
atomic_set(&vlan->refcount, 1);
@@ -516,6 +522,10 @@
return err;
}
+ spin_lock_bh(&bat_priv->softif_vlan_list_lock);
+ hlist_add_head_rcu(&vlan->list, &bat_priv->softif_vlan_list);
+ spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
+
/* add a new TT local entry. This one will be marked with the NOPURGE
* flag
*/
@@ -523,10 +533,6 @@
bat_priv->soft_iface->dev_addr, vid,
BATADV_NULL_IFINDEX, BATADV_NO_MARK);
- spin_lock_bh(&bat_priv->softif_vlan_list_lock);
- hlist_add_head_rcu(&vlan->list, &bat_priv->softif_vlan_list);
- spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
-
return 0;
}
@@ -538,18 +544,13 @@
static void batadv_softif_destroy_vlan(struct batadv_priv *bat_priv,
struct batadv_softif_vlan *vlan)
{
- spin_lock_bh(&bat_priv->softif_vlan_list_lock);
- hlist_del_rcu(&vlan->list);
- spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
-
- batadv_sysfs_del_vlan(bat_priv, vlan);
-
/* explicitly remove the associated TT local entry because it is marked
* with the NOPURGE flag
*/
batadv_tt_local_remove(bat_priv, bat_priv->soft_iface->dev_addr,
vlan->vid, "vlan interface destroyed", false);
+ batadv_sysfs_del_vlan(bat_priv, vlan);
batadv_softif_vlan_free_ref(vlan);
}
@@ -567,6 +568,8 @@
unsigned short vid)
{
struct batadv_priv *bat_priv = netdev_priv(dev);
+ struct batadv_softif_vlan *vlan;
+ int ret;
/* only 802.1Q vlans are supported.
* batman-adv does not know how to handle other types
@@ -576,7 +579,36 @@
vid |= BATADV_VLAN_HAS_TAG;
- return batadv_softif_create_vlan(bat_priv, vid);
+ /* if a new vlan is getting created and it already exists, it means that
+ * it was not deleted yet. batadv_softif_vlan_get() increases the
+ * refcount in order to revive the object.
+ *
+ * if it does not exist then create it.
+ */
+ vlan = batadv_softif_vlan_get(bat_priv, vid);
+ if (!vlan)
+ return batadv_softif_create_vlan(bat_priv, vid);
+
+ /* recreate the sysfs object if it was already destroyed (and it should
+ * be since we received a kill_vid() for this vlan
+ */
+ if (!vlan->kobj) {
+ ret = batadv_sysfs_add_vlan(bat_priv->soft_iface, vlan);
+ if (ret) {
+ batadv_softif_vlan_free_ref(vlan);
+ return ret;
+ }
+ }
+
+ /* add a new TT local entry. This one will be marked with the NOPURGE
+ * flag. This must be added again, even if the vlan object already
+ * exists, because the entry was deleted by kill_vid()
+ */
+ batadv_tt_local_add(bat_priv->soft_iface,
+ bat_priv->soft_iface->dev_addr, vid,
+ BATADV_NULL_IFINDEX, BATADV_NO_MARK);
+
+ return 0;
}
/**
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index d636bde..5f59e7f 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -511,6 +511,7 @@
struct batadv_priv *bat_priv = netdev_priv(soft_iface);
struct batadv_tt_local_entry *tt_local;
struct batadv_tt_global_entry *tt_global = NULL;
+ struct batadv_softif_vlan *vlan;
struct net_device *in_dev = NULL;
struct hlist_head *head;
struct batadv_tt_orig_list_entry *orig_entry;
@@ -572,6 +573,9 @@
if (!tt_local)
goto out;
+ /* increase the refcounter of the related vlan */
+ vlan = batadv_softif_vlan_get(bat_priv, vid);
+
batadv_dbg(BATADV_DBG_TT, bat_priv,
"Creating new local tt entry: %pM (vid: %d, ttvn: %d)\n",
addr, BATADV_PRINT_VID(vid),
@@ -604,6 +608,7 @@
if (unlikely(hash_added != 0)) {
/* remove the reference for the hash */
batadv_tt_local_entry_free_ref(tt_local);
+ batadv_softif_vlan_free_ref(vlan);
goto out;
}
@@ -1009,6 +1014,7 @@
{
struct batadv_tt_local_entry *tt_local_entry;
uint16_t flags, curr_flags = BATADV_NO_FLAGS;
+ struct batadv_softif_vlan *vlan;
tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr, vid);
if (!tt_local_entry)
@@ -1039,6 +1045,11 @@
hlist_del_rcu(&tt_local_entry->common.hash_entry);
batadv_tt_local_entry_free_ref(tt_local_entry);
+ /* decrease the reference held for this vlan */
+ vlan = batadv_softif_vlan_get(bat_priv, vid);
+ batadv_softif_vlan_free_ref(vlan);
+ batadv_softif_vlan_free_ref(vlan);
+
out:
if (tt_local_entry)
batadv_tt_local_entry_free_ref(tt_local_entry);
@@ -1111,6 +1122,7 @@
spinlock_t *list_lock; /* protects write access to the hash lists */
struct batadv_tt_common_entry *tt_common_entry;
struct batadv_tt_local_entry *tt_local;
+ struct batadv_softif_vlan *vlan;
struct hlist_node *node_tmp;
struct hlist_head *head;
uint32_t i;
@@ -1131,6 +1143,13 @@
tt_local = container_of(tt_common_entry,
struct batadv_tt_local_entry,
common);
+
+ /* decrease the reference held for this vlan */
+ vlan = batadv_softif_vlan_get(bat_priv,
+ tt_common_entry->vid);
+ batadv_softif_vlan_free_ref(vlan);
+ batadv_softif_vlan_free_ref(vlan);
+
batadv_tt_local_entry_free_ref(tt_local);
}
spin_unlock_bh(list_lock);
@@ -3139,6 +3158,7 @@
struct batadv_hashtable *hash = bat_priv->tt.local_hash;
struct batadv_tt_common_entry *tt_common;
struct batadv_tt_local_entry *tt_local;
+ struct batadv_softif_vlan *vlan;
struct hlist_node *node_tmp;
struct hlist_head *head;
spinlock_t *list_lock; /* protects write access to the hash lists */
@@ -3167,6 +3187,12 @@
tt_local = container_of(tt_common,
struct batadv_tt_local_entry,
common);
+
+ /* decrease the reference held for this vlan */
+ vlan = batadv_softif_vlan_get(bat_priv, tt_common->vid);
+ batadv_softif_vlan_free_ref(vlan);
+ batadv_softif_vlan_free_ref(vlan);
+
batadv_tt_local_entry_free_ref(tt_local);
}
spin_unlock_bh(list_lock);
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
index 34891a5..8854c05 100644
--- a/net/batman-adv/types.h
+++ b/net/batman-adv/types.h
@@ -687,6 +687,7 @@
/**
* struct batadv_softif_vlan - per VLAN attributes set
+ * @bat_priv: pointer to the mesh object
* @vid: VLAN identifier
* @kobj: kobject for sysfs vlan subdirectory
* @ap_isolation: AP isolation state
@@ -696,6 +697,7 @@
* @rcu: struct used for freeing in a RCU-safe manner
*/
struct batadv_softif_vlan {
+ struct batadv_priv *bat_priv;
unsigned short vid;
struct kobject *kobj;
atomic_t ap_isolation; /* boolean */
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index ca01d18..a7a27bc 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -289,10 +289,20 @@
{
struct hci_conn *conn = container_of(work, struct hci_conn,
disc_work.work);
+ int refcnt = atomic_read(&conn->refcnt);
BT_DBG("hcon %p state %s", conn, state_to_string(conn->state));
- if (atomic_read(&conn->refcnt))
+ WARN_ON(refcnt < 0);
+
+ /* FIXME: It was observed that in pairing failed scenario, refcnt
+ * drops below 0. Probably this is because l2cap_conn_del calls
+ * l2cap_chan_del for each channel, and inside l2cap_chan_del conn is
+ * dropped. After that loop hci_chan_del is called which also drops
+ * conn. For now make sure that ACL is alive if refcnt is higher then 0,
+ * otherwise drop it.
+ */
+ if (refcnt > 0)
return;
switch (conn->state) {
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index f2829a7..e33a982 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -385,6 +385,16 @@
{ CFM_PASSKEY, CFM_PASSKEY, REQ_PASSKEY, JUST_WORKS, OVERLAP },
};
+static u8 get_auth_method(struct smp_chan *smp, u8 local_io, u8 remote_io)
+{
+ /* If either side has unknown io_caps, use JUST WORKS */
+ if (local_io > SMP_IO_KEYBOARD_DISPLAY ||
+ remote_io > SMP_IO_KEYBOARD_DISPLAY)
+ return JUST_WORKS;
+
+ return gen_method[remote_io][local_io];
+}
+
static int tk_request(struct l2cap_conn *conn, u8 remote_oob, u8 auth,
u8 local_io, u8 remote_io)
{
@@ -401,14 +411,11 @@
BT_DBG("tk_request: auth:%d lcl:%d rem:%d", auth, local_io, remote_io);
/* If neither side wants MITM, use JUST WORKS */
- /* If either side has unknown io_caps, use JUST WORKS */
/* Otherwise, look up method from the table */
- if (!(auth & SMP_AUTH_MITM) ||
- local_io > SMP_IO_KEYBOARD_DISPLAY ||
- remote_io > SMP_IO_KEYBOARD_DISPLAY)
+ if (!(auth & SMP_AUTH_MITM))
method = JUST_WORKS;
else
- method = gen_method[remote_io][local_io];
+ method = get_auth_method(smp, local_io, remote_io);
/* If not bonding, don't ask user to confirm a Zero TK */
if (!(auth & SMP_AUTH_BONDING) && method == JUST_CFM)
@@ -669,7 +676,7 @@
{
struct smp_cmd_pairing rsp, *req = (void *) skb->data;
struct smp_chan *smp;
- u8 key_size, auth;
+ u8 key_size, auth, sec_level;
int ret;
BT_DBG("conn %p", conn);
@@ -695,7 +702,19 @@
/* We didn't start the pairing, so match remote */
auth = req->auth_req;
- conn->hcon->pending_sec_level = authreq_to_seclevel(auth);
+ sec_level = authreq_to_seclevel(auth);
+ if (sec_level > conn->hcon->pending_sec_level)
+ conn->hcon->pending_sec_level = sec_level;
+
+ /* If we need MITM check that it can be acheived */
+ if (conn->hcon->pending_sec_level >= BT_SECURITY_HIGH) {
+ u8 method;
+
+ method = get_auth_method(smp, conn->hcon->io_capability,
+ req->io_capability);
+ if (method == JUST_WORKS || method == JUST_CFM)
+ return SMP_AUTH_REQUIREMENTS;
+ }
build_pairing_cmd(conn, req, &rsp, auth);
@@ -743,6 +762,16 @@
if (check_enc_key_size(conn, key_size))
return SMP_ENC_KEY_SIZE;
+ /* If we need MITM check that it can be acheived */
+ if (conn->hcon->pending_sec_level >= BT_SECURITY_HIGH) {
+ u8 method;
+
+ method = get_auth_method(smp, req->io_capability,
+ rsp->io_capability);
+ if (method == JUST_WORKS || method == JUST_CFM)
+ return SMP_AUTH_REQUIREMENTS;
+ }
+
get_random_bytes(smp->prnd, sizeof(smp->prnd));
smp->prsp[0] = SMP_CMD_PAIRING_RSP;
@@ -838,6 +867,7 @@
struct smp_cmd_pairing cp;
struct hci_conn *hcon = conn->hcon;
struct smp_chan *smp;
+ u8 sec_level;
BT_DBG("conn %p", conn);
@@ -847,7 +877,9 @@
if (!(conn->hcon->link_mode & HCI_LM_MASTER))
return SMP_CMD_NOTSUPP;
- hcon->pending_sec_level = authreq_to_seclevel(rp->auth_req);
+ sec_level = authreq_to_seclevel(rp->auth_req);
+ if (sec_level > hcon->pending_sec_level)
+ hcon->pending_sec_level = sec_level;
if (smp_ltk_encrypt(conn, hcon->pending_sec_level))
return 0;
@@ -901,9 +933,12 @@
if (smp_sufficient_security(hcon, sec_level))
return 1;
+ if (sec_level > hcon->pending_sec_level)
+ hcon->pending_sec_level = sec_level;
+
if (hcon->link_mode & HCI_LM_MASTER)
- if (smp_ltk_encrypt(conn, sec_level))
- goto done;
+ if (smp_ltk_encrypt(conn, hcon->pending_sec_level))
+ return 0;
if (test_and_set_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags))
return 0;
@@ -918,7 +953,7 @@
* requires it.
*/
if (hcon->io_capability != HCI_IO_NO_INPUT_OUTPUT ||
- sec_level > BT_SECURITY_MEDIUM)
+ hcon->pending_sec_level > BT_SECURITY_MEDIUM)
authreq |= SMP_AUTH_MITM;
if (hcon->link_mode & HCI_LM_MASTER) {
@@ -937,9 +972,6 @@
set_bit(SMP_FLAG_INITIATOR, &smp->flags);
-done:
- hcon->pending_sec_level = sec_level;
-
return 0;
}
diff --git a/net/core/dev.c b/net/core/dev.c
index 30eedf6..367a586 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -148,6 +148,9 @@
static struct list_head offload_base __read_mostly;
static int netif_rx_internal(struct sk_buff *skb);
+static int call_netdevice_notifiers_info(unsigned long val,
+ struct net_device *dev,
+ struct netdev_notifier_info *info);
/*
* The @dev_base_head list is protected by @dev_base_lock and the rtnl
@@ -1207,7 +1210,11 @@
void netdev_state_change(struct net_device *dev)
{
if (dev->flags & IFF_UP) {
- call_netdevice_notifiers(NETDEV_CHANGE, dev);
+ struct netdev_notifier_change_info change_info;
+
+ change_info.flags_changed = 0;
+ call_netdevice_notifiers_info(NETDEV_CHANGE, dev,
+ &change_info.info);
rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL);
}
}
@@ -4089,6 +4096,8 @@
skb->vlan_tci = 0;
skb->dev = napi->dev;
skb->skb_iif = 0;
+ skb->encapsulation = 0;
+ skb_shinfo(skb)->gso_type = 0;
skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
napi->skb = skb;
@@ -4227,9 +4236,8 @@
#endif
napi->weight = weight_p;
local_irq_disable();
- while (work < quota) {
+ while (1) {
struct sk_buff *skb;
- unsigned int qlen;
while ((skb = __skb_dequeue(&sd->process_queue))) {
local_irq_enable();
@@ -4243,24 +4251,24 @@
}
rps_lock(sd);
- qlen = skb_queue_len(&sd->input_pkt_queue);
- if (qlen)
- skb_queue_splice_tail_init(&sd->input_pkt_queue,
- &sd->process_queue);
-
- if (qlen < quota - work) {
+ if (skb_queue_empty(&sd->input_pkt_queue)) {
/*
* Inline a custom version of __napi_complete().
* only current cpu owns and manipulates this napi,
- * and NAPI_STATE_SCHED is the only possible flag set on backlog.
- * we can use a plain write instead of clear_bit(),
+ * and NAPI_STATE_SCHED is the only possible flag set
+ * on backlog.
+ * We can use a plain write instead of clear_bit(),
* and we dont need an smp_mb() memory barrier.
*/
list_del(&napi->poll_list);
napi->state = 0;
+ rps_unlock(sd);
- quota = work + qlen;
+ break;
}
+
+ skb_queue_splice_tail_init(&sd->input_pkt_queue,
+ &sd->process_queue);
rps_unlock(sd);
}
local_irq_enable();
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 32d872e..559890b 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -3059,11 +3059,12 @@
memset(&t->neigh_vars[NEIGH_VAR_GC_INTERVAL], 0,
sizeof(t->neigh_vars[NEIGH_VAR_GC_INTERVAL]));
} else {
+ struct neigh_table *tbl = p->tbl;
dev_name_source = "default";
- t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = (int *)(p + 1);
- t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = (int *)(p + 1) + 1;
- t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = (int *)(p + 1) + 2;
- t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = (int *)(p + 1) + 3;
+ t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = &tbl->gc_interval;
+ t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = &tbl->gc_thresh1;
+ t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = &tbl->gc_thresh2;
+ t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = &tbl->gc_thresh3;
}
if (handler) {
diff --git a/net/dns_resolver/dns_query.c b/net/dns_resolver/dns_query.c
index 9acec61f..dd8696a 100644
--- a/net/dns_resolver/dns_query.c
+++ b/net/dns_resolver/dns_query.c
@@ -150,7 +150,7 @@
goto put;
memcpy(*_result, upayload->data, len);
- *_result[len] = '\0';
+ (*_result)[len] = '\0';
if (_expiry)
*_expiry = rkey->expiry;
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index d5e6836..d156b3c 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1429,6 +1429,9 @@
int proto = iph->protocol;
int err = -ENOSYS;
+ if (skb->encapsulation)
+ skb_set_inner_network_header(skb, nhoff);
+
csum_replace2(&iph->check, iph->tot_len, newlen);
iph->tot_len = newlen;
diff --git a/net/ipv4/gre_demux.c b/net/ipv4/gre_demux.c
index 4e9619b..0485bf7 100644
--- a/net/ipv4/gre_demux.c
+++ b/net/ipv4/gre_demux.c
@@ -68,6 +68,7 @@
skb_push(skb, hdr_len);
+ skb_reset_transport_header(skb);
greh = (struct gre_base_hdr *)skb->data;
greh->flags = tnl_flags_to_gre_flags(tpi->flags);
greh->protocol = tpi->proto;
diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c
index eb92deb..f0bdd47 100644
--- a/net/ipv4/gre_offload.c
+++ b/net/ipv4/gre_offload.c
@@ -263,6 +263,9 @@
int err = -ENOENT;
__be16 type;
+ skb->encapsulation = 1;
+ skb_shinfo(skb)->gso_type = SKB_GSO_GRE;
+
type = greh->protocol;
if (greh->flags & GRE_KEY)
grehlen += GRE_HEADER_SECTION;
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 79c3d94..42b7bcf 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -739,8 +739,6 @@
/* fall through */
case 0:
info = ntohs(icmph->un.frag.mtu);
- if (!info)
- goto out;
}
break;
case ICMP_SR_FAILED:
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 6748d42..db710b0 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -1944,6 +1944,10 @@
rtnl_lock();
in_dev = ip_mc_find_dev(net, imr);
+ if (!in_dev) {
+ ret = -ENODEV;
+ goto out;
+ }
ifindex = imr->imr_ifindex;
for (imlp = &inet->mc_list;
(iml = rtnl_dereference(*imlp)) != NULL;
@@ -1961,16 +1965,14 @@
*imlp = iml->next_rcu;
- if (in_dev)
- ip_mc_dec_group(in_dev, group);
+ ip_mc_dec_group(in_dev, group);
rtnl_unlock();
/* decrease mem now to avoid the memleak warning */
atomic_sub(sizeof(*iml), &sk->sk_omem_alloc);
kfree_rcu(iml, rcu);
return 0;
}
- if (!in_dev)
- ret = -ENODEV;
+out:
rtnl_unlock();
return ret;
}
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
index 5e7aece..ad38249 100644
--- a/net/ipv4/ip_options.c
+++ b/net/ipv4/ip_options.c
@@ -288,6 +288,10 @@
optptr++;
continue;
}
+ if (unlikely(l < 2)) {
+ pp_ptr = optptr;
+ goto error;
+ }
optlen = optptr[1];
if (optlen < 2 || optlen > l) {
pp_ptr = optptr;
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index 54b6731..6f9de61 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -169,6 +169,7 @@
hlist_for_each_entry_rcu(t, head, hash_node) {
if (remote != t->parms.iph.daddr ||
+ t->parms.iph.saddr != 0 ||
!(t->dev->flags & IFF_UP))
continue;
@@ -185,10 +186,11 @@
head = &itn->tunnels[hash];
hlist_for_each_entry_rcu(t, head, hash_node) {
- if ((local != t->parms.iph.saddr &&
- (local != t->parms.iph.daddr ||
- !ipv4_is_multicast(local))) ||
- !(t->dev->flags & IFF_UP))
+ if ((local != t->parms.iph.saddr || t->parms.iph.daddr != 0) &&
+ (local != t->parms.iph.daddr || !ipv4_is_multicast(local)))
+ continue;
+
+ if (!(t->dev->flags & IFF_UP))
continue;
if (!ip_tunnel_key_match(&t->parms, flags, key))
@@ -205,6 +207,8 @@
hlist_for_each_entry_rcu(t, head, hash_node) {
if (t->parms.i_key != key ||
+ t->parms.iph.saddr != 0 ||
+ t->parms.iph.daddr != 0 ||
!(t->dev->flags & IFF_UP))
continue;
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 082239f..3162ea9 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1010,7 +1010,7 @@
const struct iphdr *iph = (const struct iphdr *) skb->data;
struct flowi4 fl4;
struct rtable *rt;
- struct dst_entry *dst;
+ struct dst_entry *odst = NULL;
bool new = false;
bh_lock_sock(sk);
@@ -1018,16 +1018,17 @@
if (!ip_sk_accept_pmtu(sk))
goto out;
- rt = (struct rtable *) __sk_dst_get(sk);
+ odst = sk_dst_get(sk);
- if (sock_owned_by_user(sk) || !rt) {
+ if (sock_owned_by_user(sk) || !odst) {
__ipv4_sk_update_pmtu(skb, sk, mtu);
goto out;
}
__build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0);
- if (!__sk_dst_check(sk, 0)) {
+ rt = (struct rtable *)odst;
+ if (odst->obsolete && odst->ops->check(odst, 0) == NULL) {
rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
if (IS_ERR(rt))
goto out;
@@ -1037,8 +1038,7 @@
__ip_rt_update_pmtu((struct rtable *) rt->dst.path, &fl4, mtu);
- dst = dst_check(&rt->dst, 0);
- if (!dst) {
+ if (!dst_check(&rt->dst, 0)) {
if (new)
dst_release(&rt->dst);
@@ -1050,10 +1050,11 @@
}
if (new)
- __sk_dst_set(sk, &rt->dst);
+ sk_dst_set(sk, &rt->dst);
out:
bh_unlock_sock(sk);
+ dst_release(odst);
}
EXPORT_SYMBOL_GPL(ipv4_sk_update_pmtu);
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index eb1dde3..9d2118e 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1108,7 +1108,7 @@
if (unlikely(tp->repair)) {
if (tp->repair_queue == TCP_RECV_QUEUE) {
copied = tcp_send_rcvq(sk, msg, size);
- goto out;
+ goto out_nopush;
}
err = -EINVAL;
@@ -1282,6 +1282,7 @@
out:
if (copied)
tcp_push(sk, flags, mss_now, tp->nonagle, size_goal);
+out_nopush:
release_sock(sk);
return copied + copied_syn;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index b5c2375..40639c2 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1106,7 +1106,7 @@
}
/* D-SACK for already forgotten data... Do dumb counting. */
- if (dup_sack && tp->undo_marker && tp->undo_retrans &&
+ if (dup_sack && tp->undo_marker && tp->undo_retrans > 0 &&
!after(end_seq_0, prior_snd_una) &&
after(end_seq_0, tp->undo_marker))
tp->undo_retrans--;
@@ -1187,7 +1187,7 @@
/* Account D-SACK for retransmitted packet. */
if (dup_sack && (sacked & TCPCB_RETRANS)) {
- if (tp->undo_marker && tp->undo_retrans &&
+ if (tp->undo_marker && tp->undo_retrans > 0 &&
after(end_seq, tp->undo_marker))
tp->undo_retrans--;
if (sacked & TCPCB_SACKED_ACKED)
@@ -1893,7 +1893,7 @@
tp->lost_out = 0;
tp->undo_marker = 0;
- tp->undo_retrans = 0;
+ tp->undo_retrans = -1;
}
void tcp_clear_retrans(struct tcp_sock *tp)
@@ -2665,7 +2665,7 @@
tp->prior_ssthresh = 0;
tp->undo_marker = tp->snd_una;
- tp->undo_retrans = tp->retrans_out;
+ tp->undo_retrans = tp->retrans_out ? : -1;
if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) {
if (!ece_ack)
diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c
index 4e86c59..55046ec 100644
--- a/net/ipv4/tcp_offload.c
+++ b/net/ipv4/tcp_offload.c
@@ -309,7 +309,7 @@
th->check = ~tcp_v4_check(skb->len - thoff, iph->saddr,
iph->daddr, 0);
- skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
+ skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
return tcp_gro_complete(skb);
}
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index d92bce0..179b51e 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2525,8 +2525,6 @@
if (!tp->retrans_stamp)
tp->retrans_stamp = TCP_SKB_CB(skb)->when;
- tp->undo_retrans += tcp_skb_pcount(skb);
-
/* snd_nxt is stored to detect loss of retransmitted segment,
* see tcp_input.c tcp_sacktag_write_queue().
*/
@@ -2534,6 +2532,10 @@
} else if (err != -EBUSY) {
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL);
}
+
+ if (tp->undo_retrans < 0)
+ tp->undo_retrans = 0;
+ tp->undo_retrans += tcp_skb_pcount(skb);
return err;
}
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index d92f94b..7d5a866 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1588,8 +1588,11 @@
goto csum_error;
- if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf))
+ if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) {
+ UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
+ is_udplite);
goto drop;
+ }
rc = 0;
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 08b367c..617f095 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -1301,8 +1301,17 @@
len = ntohs(ipv6_hdr(skb)->payload_len) + sizeof(struct ipv6hdr);
len -= skb_network_header_len(skb);
- /* Drop queries with not link local source */
- if (!(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL))
+ /* RFC3810 6.2
+ * Upon reception of an MLD message that contains a Query, the node
+ * checks if the source address of the message is a valid link-local
+ * address, if the Hop Limit is set to 1, and if the Router Alert
+ * option is present in the Hop-By-Hop Options header of the IPv6
+ * packet. If any of these checks fails, the packet is dropped.
+ */
+ if (!(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL) ||
+ ipv6_hdr(skb)->hop_limit != 1 ||
+ !(IP6CB(skb)->flags & IP6SKB_ROUTERALERT) ||
+ IP6CB(skb)->ra != htons(IPV6_OPT_ROUTERALERT_MLD))
return -EINVAL;
idev = __in6_dev_get(skb->dev);
diff --git a/net/ipv6/tcpv6_offload.c b/net/ipv6/tcpv6_offload.c
index 8517d3c..01b0ff9 100644
--- a/net/ipv6/tcpv6_offload.c
+++ b/net/ipv6/tcpv6_offload.c
@@ -73,7 +73,7 @@
th->check = ~tcp_v6_check(skb->len - thoff, &iph->saddr,
&iph->daddr, 0);
- skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
+ skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV6;
return tcp_gro_complete(skb);
}
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 95c8347..7092ff7 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -674,8 +674,11 @@
goto csum_error;
}
- if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf))
+ if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) {
+ UDP6_INC_STATS_BH(sock_net(sk),
+ UDP_MIB_RCVBUFERRORS, is_udplite);
goto drop;
+ }
skb_dst_drop(skb);
@@ -690,6 +693,7 @@
bh_unlock_sock(sk);
return rc;
+
csum_error:
UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
drop:
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index 950909f..13752d9 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -1365,7 +1365,7 @@
int err;
if (level != SOL_PPPOL2TP)
- return udp_prot.setsockopt(sk, level, optname, optval, optlen);
+ return -EINVAL;
if (optlen < sizeof(int))
return -EINVAL;
@@ -1491,7 +1491,7 @@
struct pppol2tp_session *ps;
if (level != SOL_PPPOL2TP)
- return udp_prot.getsockopt(sk, level, optname, optval, optlen);
+ return -EINVAL;
if (get_user(len, optlen))
return -EFAULT;
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 6886601..a6cda52 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -1096,11 +1096,12 @@
int err;
/* 24 + 6 = header + auth_algo + auth_transaction + status_code */
- skb = dev_alloc_skb(local->hw.extra_tx_headroom + 24 + 6 + extra_len);
+ skb = dev_alloc_skb(local->hw.extra_tx_headroom + IEEE80211_WEP_IV_LEN +
+ 24 + 6 + extra_len + IEEE80211_WEP_ICV_LEN);
if (!skb)
return;
- skb_reserve(skb, local->hw.extra_tx_headroom);
+ skb_reserve(skb, local->hw.extra_tx_headroom + IEEE80211_WEP_IV_LEN);
mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24 + 6);
memset(mgmt, 0, 24 + 6);
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index ab4566c..8746ff9 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -35,7 +35,7 @@
{
INIT_LIST_HEAD(&afi->tables);
nfnl_lock(NFNL_SUBSYS_NFTABLES);
- list_add_tail(&afi->list, &net->nft.af_info);
+ list_add_tail_rcu(&afi->list, &net->nft.af_info);
nfnl_unlock(NFNL_SUBSYS_NFTABLES);
return 0;
}
@@ -51,7 +51,7 @@
void nft_unregister_afinfo(struct nft_af_info *afi)
{
nfnl_lock(NFNL_SUBSYS_NFTABLES);
- list_del(&afi->list);
+ list_del_rcu(&afi->list);
nfnl_unlock(NFNL_SUBSYS_NFTABLES);
}
EXPORT_SYMBOL_GPL(nft_unregister_afinfo);
@@ -277,11 +277,14 @@
struct net *net = sock_net(skb->sk);
int family = nfmsg->nfgen_family;
- list_for_each_entry(afi, &net->nft.af_info, list) {
+ rcu_read_lock();
+ cb->seq = net->nft.base_seq;
+
+ list_for_each_entry_rcu(afi, &net->nft.af_info, list) {
if (family != NFPROTO_UNSPEC && family != afi->family)
continue;
- list_for_each_entry(table, &afi->tables, list) {
+ list_for_each_entry_rcu(table, &afi->tables, list) {
if (idx < s_idx)
goto cont;
if (idx > s_idx)
@@ -294,11 +297,14 @@
NLM_F_MULTI,
afi->family, table) < 0)
goto done;
+
+ nl_dump_check_consistent(cb, nlmsg_hdr(skb));
cont:
idx++;
}
}
done:
+ rcu_read_unlock();
cb->args[0] = idx;
return skb->len;
}
@@ -407,6 +413,9 @@
if (flags & ~NFT_TABLE_F_DORMANT)
return -EINVAL;
+ if (flags == ctx->table->flags)
+ return 0;
+
trans = nft_trans_alloc(ctx, NFT_MSG_NEWTABLE,
sizeof(struct nft_trans_table));
if (trans == NULL)
@@ -514,7 +523,7 @@
module_put(afi->owner);
return err;
}
- list_add_tail(&table->list, &afi->tables);
+ list_add_tail_rcu(&table->list, &afi->tables);
return 0;
}
@@ -546,7 +555,7 @@
if (err < 0)
return err;
- list_del(&table->list);
+ list_del_rcu(&table->list);
return 0;
}
@@ -635,13 +644,20 @@
{
struct nft_stats *cpu_stats, total;
struct nlattr *nest;
+ unsigned int seq;
+ u64 pkts, bytes;
int cpu;
memset(&total, 0, sizeof(total));
for_each_possible_cpu(cpu) {
cpu_stats = per_cpu_ptr(stats, cpu);
- total.pkts += cpu_stats->pkts;
- total.bytes += cpu_stats->bytes;
+ do {
+ seq = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
+ pkts = cpu_stats->pkts;
+ bytes = cpu_stats->bytes;
+ } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, seq));
+ total.pkts += pkts;
+ total.bytes += bytes;
}
nest = nla_nest_start(skb, NFTA_CHAIN_COUNTERS);
if (nest == NULL)
@@ -761,12 +777,15 @@
struct net *net = sock_net(skb->sk);
int family = nfmsg->nfgen_family;
- list_for_each_entry(afi, &net->nft.af_info, list) {
+ rcu_read_lock();
+ cb->seq = net->nft.base_seq;
+
+ list_for_each_entry_rcu(afi, &net->nft.af_info, list) {
if (family != NFPROTO_UNSPEC && family != afi->family)
continue;
- list_for_each_entry(table, &afi->tables, list) {
- list_for_each_entry(chain, &table->chains, list) {
+ list_for_each_entry_rcu(table, &afi->tables, list) {
+ list_for_each_entry_rcu(chain, &table->chains, list) {
if (idx < s_idx)
goto cont;
if (idx > s_idx)
@@ -778,17 +797,19 @@
NLM_F_MULTI,
afi->family, table, chain) < 0)
goto done;
+
+ nl_dump_check_consistent(cb, nlmsg_hdr(skb));
cont:
idx++;
}
}
}
done:
+ rcu_read_unlock();
cb->args[0] = idx;
return skb->len;
}
-
static int nf_tables_getchain(struct sock *nlsk, struct sk_buff *skb,
const struct nlmsghdr *nlh,
const struct nlattr * const nla[])
@@ -861,7 +882,7 @@
if (!tb[NFTA_COUNTER_BYTES] || !tb[NFTA_COUNTER_PACKETS])
return ERR_PTR(-EINVAL);
- newstats = alloc_percpu(struct nft_stats);
+ newstats = netdev_alloc_pcpu_stats(struct nft_stats);
if (newstats == NULL)
return ERR_PTR(-ENOMEM);
@@ -1077,7 +1098,7 @@
}
basechain->stats = stats;
} else {
- stats = alloc_percpu(struct nft_stats);
+ stats = netdev_alloc_pcpu_stats(struct nft_stats);
if (IS_ERR(stats)) {
module_put(type->owner);
kfree(basechain);
@@ -1130,7 +1151,7 @@
goto err2;
table->use++;
- list_add_tail(&chain->list, &table->chains);
+ list_add_tail_rcu(&chain->list, &table->chains);
return 0;
err2:
if (!(table->flags & NFT_TABLE_F_DORMANT) &&
@@ -1180,7 +1201,7 @@
return err;
table->use--;
- list_del(&chain->list);
+ list_del_rcu(&chain->list);
return 0;
}
@@ -1199,9 +1220,9 @@
{
nfnl_lock(NFNL_SUBSYS_NFTABLES);
if (type->family == NFPROTO_UNSPEC)
- list_add_tail(&type->list, &nf_tables_expressions);
+ list_add_tail_rcu(&type->list, &nf_tables_expressions);
else
- list_add(&type->list, &nf_tables_expressions);
+ list_add_rcu(&type->list, &nf_tables_expressions);
nfnl_unlock(NFNL_SUBSYS_NFTABLES);
return 0;
}
@@ -1216,7 +1237,7 @@
void nft_unregister_expr(struct nft_expr_type *type)
{
nfnl_lock(NFNL_SUBSYS_NFTABLES);
- list_del(&type->list);
+ list_del_rcu(&type->list);
nfnl_unlock(NFNL_SUBSYS_NFTABLES);
}
EXPORT_SYMBOL_GPL(nft_unregister_expr);
@@ -1549,16 +1570,17 @@
unsigned int idx = 0, s_idx = cb->args[0];
struct net *net = sock_net(skb->sk);
int family = nfmsg->nfgen_family;
- u8 genctr = ACCESS_ONCE(net->nft.genctr);
- u8 gencursor = ACCESS_ONCE(net->nft.gencursor);
- list_for_each_entry(afi, &net->nft.af_info, list) {
+ rcu_read_lock();
+ cb->seq = net->nft.base_seq;
+
+ list_for_each_entry_rcu(afi, &net->nft.af_info, list) {
if (family != NFPROTO_UNSPEC && family != afi->family)
continue;
- list_for_each_entry(table, &afi->tables, list) {
- list_for_each_entry(chain, &table->chains, list) {
- list_for_each_entry(rule, &chain->rules, list) {
+ list_for_each_entry_rcu(table, &afi->tables, list) {
+ list_for_each_entry_rcu(chain, &table->chains, list) {
+ list_for_each_entry_rcu(rule, &chain->rules, list) {
if (!nft_rule_is_active(net, rule))
goto cont;
if (idx < s_idx)
@@ -1572,6 +1594,8 @@
NLM_F_MULTI | NLM_F_APPEND,
afi->family, table, chain, rule) < 0)
goto done;
+
+ nl_dump_check_consistent(cb, nlmsg_hdr(skb));
cont:
idx++;
}
@@ -1579,9 +1603,7 @@
}
}
done:
- /* Invalidate this dump, a transition to the new generation happened */
- if (gencursor != net->nft.gencursor || genctr != net->nft.genctr)
- return -EBUSY;
+ rcu_read_unlock();
cb->args[0] = idx;
return skb->len;
@@ -1932,7 +1954,7 @@
int nft_register_set(struct nft_set_ops *ops)
{
nfnl_lock(NFNL_SUBSYS_NFTABLES);
- list_add_tail(&ops->list, &nf_tables_set_ops);
+ list_add_tail_rcu(&ops->list, &nf_tables_set_ops);
nfnl_unlock(NFNL_SUBSYS_NFTABLES);
return 0;
}
@@ -1941,7 +1963,7 @@
void nft_unregister_set(struct nft_set_ops *ops)
{
nfnl_lock(NFNL_SUBSYS_NFTABLES);
- list_del(&ops->list);
+ list_del_rcu(&ops->list);
nfnl_unlock(NFNL_SUBSYS_NFTABLES);
}
EXPORT_SYMBOL_GPL(nft_unregister_set);
@@ -2234,7 +2256,10 @@
if (cb->args[1])
return skb->len;
- list_for_each_entry(set, &ctx->table->sets, list) {
+ rcu_read_lock();
+ cb->seq = ctx->net->nft.base_seq;
+
+ list_for_each_entry_rcu(set, &ctx->table->sets, list) {
if (idx < s_idx)
goto cont;
if (nf_tables_fill_set(skb, ctx, set, NFT_MSG_NEWSET,
@@ -2242,11 +2267,13 @@
cb->args[0] = idx;
goto done;
}
+ nl_dump_check_consistent(cb, nlmsg_hdr(skb));
cont:
idx++;
}
cb->args[1] = 1;
done:
+ rcu_read_unlock();
return skb->len;
}
@@ -2260,7 +2287,10 @@
if (cb->args[1])
return skb->len;
- list_for_each_entry(table, &ctx->afi->tables, list) {
+ rcu_read_lock();
+ cb->seq = ctx->net->nft.base_seq;
+
+ list_for_each_entry_rcu(table, &ctx->afi->tables, list) {
if (cur_table) {
if (cur_table != table)
continue;
@@ -2269,7 +2299,7 @@
}
ctx->table = table;
idx = 0;
- list_for_each_entry(set, &ctx->table->sets, list) {
+ list_for_each_entry_rcu(set, &ctx->table->sets, list) {
if (idx < s_idx)
goto cont;
if (nf_tables_fill_set(skb, ctx, set, NFT_MSG_NEWSET,
@@ -2278,12 +2308,14 @@
cb->args[2] = (unsigned long) table;
goto done;
}
+ nl_dump_check_consistent(cb, nlmsg_hdr(skb));
cont:
idx++;
}
}
cb->args[1] = 1;
done:
+ rcu_read_unlock();
return skb->len;
}
@@ -2300,7 +2332,10 @@
if (cb->args[1])
return skb->len;
- list_for_each_entry(afi, &net->nft.af_info, list) {
+ rcu_read_lock();
+ cb->seq = net->nft.base_seq;
+
+ list_for_each_entry_rcu(afi, &net->nft.af_info, list) {
if (cur_family) {
if (afi->family != cur_family)
continue;
@@ -2308,7 +2343,7 @@
cur_family = 0;
}
- list_for_each_entry(table, &afi->tables, list) {
+ list_for_each_entry_rcu(table, &afi->tables, list) {
if (cur_table) {
if (cur_table != table)
continue;
@@ -2319,7 +2354,7 @@
ctx->table = table;
ctx->afi = afi;
idx = 0;
- list_for_each_entry(set, &ctx->table->sets, list) {
+ list_for_each_entry_rcu(set, &ctx->table->sets, list) {
if (idx < s_idx)
goto cont;
if (nf_tables_fill_set(skb, ctx, set,
@@ -2330,6 +2365,7 @@
cb->args[3] = afi->family;
goto done;
}
+ nl_dump_check_consistent(cb, nlmsg_hdr(skb));
cont:
idx++;
}
@@ -2339,6 +2375,7 @@
}
cb->args[1] = 1;
done:
+ rcu_read_unlock();
return skb->len;
}
@@ -2597,7 +2634,7 @@
if (err < 0)
goto err2;
- list_add_tail(&set->list, &table->sets);
+ list_add_tail_rcu(&set->list, &table->sets);
table->use++;
return 0;
@@ -2617,7 +2654,7 @@
static void nf_tables_set_destroy(const struct nft_ctx *ctx, struct nft_set *set)
{
- list_del(&set->list);
+ list_del_rcu(&set->list);
nf_tables_set_notify(ctx, set, NFT_MSG_DELSET, GFP_ATOMIC);
nft_set_destroy(set);
}
@@ -2652,7 +2689,7 @@
if (err < 0)
return err;
- list_del(&set->list);
+ list_del_rcu(&set->list);
ctx.table->use--;
return 0;
}
@@ -2704,14 +2741,14 @@
}
bind:
binding->chain = ctx->chain;
- list_add_tail(&binding->list, &set->bindings);
+ list_add_tail_rcu(&binding->list, &set->bindings);
return 0;
}
void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set,
struct nft_set_binding *binding)
{
- list_del(&binding->list);
+ list_del_rcu(&binding->list);
if (list_empty(&set->bindings) && set->flags & NFT_SET_ANONYMOUS &&
!(set->flags & NFT_SET_INACTIVE))
@@ -3346,7 +3383,7 @@
struct nft_set *set;
/* Bump generation counter, invalidate any dump in progress */
- net->nft.genctr++;
+ while (++net->nft.base_seq == 0);
/* A new generation has just started */
net->nft.gencursor = gencursor_next(net);
@@ -3491,12 +3528,12 @@
}
nft_trans_destroy(trans);
} else {
- list_del(&trans->ctx.table->list);
+ list_del_rcu(&trans->ctx.table->list);
}
break;
case NFT_MSG_DELTABLE:
- list_add_tail(&trans->ctx.table->list,
- &trans->ctx.afi->tables);
+ list_add_tail_rcu(&trans->ctx.table->list,
+ &trans->ctx.afi->tables);
nft_trans_destroy(trans);
break;
case NFT_MSG_NEWCHAIN:
@@ -3507,7 +3544,7 @@
nft_trans_destroy(trans);
} else {
trans->ctx.table->use--;
- list_del(&trans->ctx.chain->list);
+ list_del_rcu(&trans->ctx.chain->list);
if (!(trans->ctx.table->flags & NFT_TABLE_F_DORMANT) &&
trans->ctx.chain->flags & NFT_BASE_CHAIN) {
nf_unregister_hooks(nft_base_chain(trans->ctx.chain)->ops,
@@ -3517,8 +3554,8 @@
break;
case NFT_MSG_DELCHAIN:
trans->ctx.table->use++;
- list_add_tail(&trans->ctx.chain->list,
- &trans->ctx.table->chains);
+ list_add_tail_rcu(&trans->ctx.chain->list,
+ &trans->ctx.table->chains);
nft_trans_destroy(trans);
break;
case NFT_MSG_NEWRULE:
@@ -3532,12 +3569,12 @@
break;
case NFT_MSG_NEWSET:
trans->ctx.table->use--;
- list_del(&nft_trans_set(trans)->list);
+ list_del_rcu(&nft_trans_set(trans)->list);
break;
case NFT_MSG_DELSET:
trans->ctx.table->use++;
- list_add_tail(&nft_trans_set(trans)->list,
- &trans->ctx.table->sets);
+ list_add_tail_rcu(&nft_trans_set(trans)->list,
+ &trans->ctx.table->sets);
nft_trans_destroy(trans);
break;
case NFT_MSG_NEWSETELEM:
@@ -3951,6 +3988,7 @@
{
INIT_LIST_HEAD(&net->nft.af_info);
INIT_LIST_HEAD(&net->nft.commit_list);
+ net->nft.base_seq = 1;
return 0;
}
diff --git a/net/netfilter/nf_tables_core.c b/net/netfilter/nf_tables_core.c
index 345acfb..3b90eb2 100644
--- a/net/netfilter/nf_tables_core.c
+++ b/net/netfilter/nf_tables_core.c
@@ -109,7 +109,7 @@
struct nft_data data[NFT_REG_MAX + 1];
unsigned int stackptr = 0;
struct nft_jumpstack jumpstack[NFT_JUMP_STACK_SIZE];
- struct nft_stats __percpu *stats;
+ struct nft_stats *stats;
int rulenum;
/*
* Cache cursor to avoid problems in case that the cursor is updated
@@ -205,9 +205,11 @@
nft_trace_packet(pkt, basechain, -1, NFT_TRACE_POLICY);
rcu_read_lock_bh();
- stats = rcu_dereference(nft_base_chain(basechain)->stats);
- __this_cpu_inc(stats->pkts);
- __this_cpu_add(stats->bytes, pkt->skb->len);
+ stats = this_cpu_ptr(rcu_dereference(nft_base_chain(basechain)->stats));
+ u64_stats_update_begin(&stats->syncp);
+ stats->pkts++;
+ stats->bytes += pkt->skb->len;
+ u64_stats_update_end(&stats->syncp);
rcu_read_unlock_bh();
return nft_base_chain(basechain)->policy;
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 15c731f..e6fac7e 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -636,7 +636,7 @@
while (nlk->cb_running && netlink_dump_space(nlk)) {
err = netlink_dump(sk);
if (err < 0) {
- sk->sk_err = err;
+ sk->sk_err = -err;
sk->sk_error_report(sk);
break;
}
@@ -2483,7 +2483,7 @@
atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) {
ret = netlink_dump(sk);
if (ret) {
- sk->sk_err = ret;
+ sk->sk_err = -ret;
sk->sk_error_report(sk);
}
}
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index c36856a..e70d8b1 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -551,6 +551,8 @@
case OVS_ACTION_ATTR_SAMPLE:
err = sample(dp, skb, a);
+ if (unlikely(err)) /* skb already freed. */
+ return err;
break;
}
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index 0d407bc..9db4bf6 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2007-2013 Nicira, Inc.
+ * Copyright (c) 2007-2014 Nicira, Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
@@ -276,7 +276,7 @@
OVS_CB(skb)->flow = flow;
OVS_CB(skb)->pkt_key = &key;
- ovs_flow_stats_update(OVS_CB(skb)->flow, skb);
+ ovs_flow_stats_update(OVS_CB(skb)->flow, key.tp.flags, skb);
ovs_execute_actions(dp, skb);
stats_counter = &stats->n_hit;
@@ -889,8 +889,11 @@
}
/* The unmasked key has to be the same for flow updates. */
if (unlikely(!ovs_flow_cmp_unmasked_key(flow, &match))) {
- error = -EEXIST;
- goto err_unlock_ovs;
+ flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
+ if (!flow) {
+ error = -ENOENT;
+ goto err_unlock_ovs;
+ }
}
/* Update actions. */
old_acts = ovsl_dereference(flow->sf_acts);
@@ -981,16 +984,12 @@
goto err_unlock_ovs;
}
/* Check that the flow exists. */
- flow = ovs_flow_tbl_lookup(&dp->table, &key);
+ flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
if (unlikely(!flow)) {
error = -ENOENT;
goto err_unlock_ovs;
}
- /* The unmasked key has to be the same for flow updates. */
- if (unlikely(!ovs_flow_cmp_unmasked_key(flow, &match))) {
- error = -EEXIST;
- goto err_unlock_ovs;
- }
+
/* Update actions, if present. */
if (likely(acts)) {
old_acts = ovsl_dereference(flow->sf_acts);
@@ -1063,8 +1062,8 @@
goto unlock;
}
- flow = ovs_flow_tbl_lookup(&dp->table, &key);
- if (!flow || !ovs_flow_cmp_unmasked_key(flow, &match)) {
+ flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
+ if (!flow) {
err = -ENOENT;
goto unlock;
}
@@ -1113,8 +1112,8 @@
goto unlock;
}
- flow = ovs_flow_tbl_lookup(&dp->table, &key);
- if (unlikely(!flow || !ovs_flow_cmp_unmasked_key(flow, &match))) {
+ flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
+ if (unlikely(!flow)) {
err = -ENOENT;
goto unlock;
}
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index 334751c..d07ab53 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -61,10 +61,10 @@
#define TCP_FLAGS_BE16(tp) (*(__be16 *)&tcp_flag_word(tp) & htons(0x0FFF))
-void ovs_flow_stats_update(struct sw_flow *flow, struct sk_buff *skb)
+void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags,
+ struct sk_buff *skb)
{
struct flow_stats *stats;
- __be16 tcp_flags = flow->key.tp.flags;
int node = numa_node_id();
stats = rcu_dereference(flow->stats[node]);
diff --git a/net/openvswitch/flow.h b/net/openvswitch/flow.h
index ac395d2..5e5aaed 100644
--- a/net/openvswitch/flow.h
+++ b/net/openvswitch/flow.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2007-2013 Nicira, Inc.
+ * Copyright (c) 2007-2014 Nicira, Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
@@ -180,7 +180,8 @@
unsigned char ar_tip[4]; /* target IP address */
} __packed;
-void ovs_flow_stats_update(struct sw_flow *, struct sk_buff *);
+void ovs_flow_stats_update(struct sw_flow *, __be16 tcp_flags,
+ struct sk_buff *);
void ovs_flow_stats_get(const struct sw_flow *, struct ovs_flow_stats *,
unsigned long *used, __be16 *tcp_flags);
void ovs_flow_stats_clear(struct sw_flow *);
diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c
index 574c3ab..cf2d853 100644
--- a/net/openvswitch/flow_table.c
+++ b/net/openvswitch/flow_table.c
@@ -456,6 +456,22 @@
return ovs_flow_tbl_lookup_stats(tbl, key, &n_mask_hit);
}
+struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,
+ struct sw_flow_match *match)
+{
+ struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
+ struct sw_flow_mask *mask;
+ struct sw_flow *flow;
+
+ /* Always called under ovs-mutex. */
+ list_for_each_entry(mask, &tbl->mask_list, list) {
+ flow = masked_flow_lookup(ti, match->key, mask);
+ if (flow && ovs_flow_cmp_unmasked_key(flow, match)) /* Found */
+ return flow;
+ }
+ return NULL;
+}
+
int ovs_flow_tbl_num_masks(const struct flow_table *table)
{
struct sw_flow_mask *mask;
diff --git a/net/openvswitch/flow_table.h b/net/openvswitch/flow_table.h
index ca8a582..5918bff 100644
--- a/net/openvswitch/flow_table.h
+++ b/net/openvswitch/flow_table.h
@@ -76,7 +76,8 @@
u32 *n_mask_hit);
struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *,
const struct sw_flow_key *);
-
+struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,
+ struct sw_flow_match *match);
bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow,
struct sw_flow_match *match);
diff --git a/net/openvswitch/vport-gre.c b/net/openvswitch/vport-gre.c
index 35ec4fe..f49148a 100644
--- a/net/openvswitch/vport-gre.c
+++ b/net/openvswitch/vport-gre.c
@@ -110,6 +110,22 @@
return PACKET_RCVD;
}
+/* Called with rcu_read_lock and BH disabled. */
+static int gre_err(struct sk_buff *skb, u32 info,
+ const struct tnl_ptk_info *tpi)
+{
+ struct ovs_net *ovs_net;
+ struct vport *vport;
+
+ ovs_net = net_generic(dev_net(skb->dev), ovs_net_id);
+ vport = rcu_dereference(ovs_net->vport_net.gre_vport);
+
+ if (unlikely(!vport))
+ return PACKET_REJECT;
+ else
+ return PACKET_RCVD;
+}
+
static int gre_tnl_send(struct vport *vport, struct sk_buff *skb)
{
struct net *net = ovs_dp_get_net(vport->dp);
@@ -186,6 +202,7 @@
static struct gre_cisco_protocol gre_protocol = {
.handler = gre_rcv,
+ .err_handler = gre_err,
.priority = 1,
};
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index c39b583..70c0be8 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -38,6 +38,7 @@
#include <linux/errno.h>
#include <linux/rtnetlink.h>
#include <linux/skbuff.h>
+#include <linux/bitmap.h>
#include <net/netlink.h>
#include <net/act_api.h>
#include <net/pkt_cls.h>
@@ -460,17 +461,25 @@
return 0;
}
+#define NR_U32_NODE (1<<12)
static u32 gen_new_kid(struct tc_u_hnode *ht, u32 handle)
{
struct tc_u_knode *n;
- unsigned int i = 0x7FF;
+ unsigned long i;
+ unsigned long *bitmap = kzalloc(BITS_TO_LONGS(NR_U32_NODE) * sizeof(unsigned long),
+ GFP_KERNEL);
+ if (!bitmap)
+ return handle | 0xFFF;
for (n = ht->ht[TC_U32_HASH(handle)]; n; n = n->next)
- if (i < TC_U32_NODE(n->handle))
- i = TC_U32_NODE(n->handle);
- i++;
+ set_bit(TC_U32_NODE(n->handle), bitmap);
- return handle | (i > 0xFFF ? 0xFFF : i);
+ i = find_next_zero_bit(bitmap, NR_U32_NODE, 0x800);
+ if (i >= NR_U32_NODE)
+ i = find_next_zero_bit(bitmap, NR_U32_NODE, 1);
+
+ kfree(bitmap);
+ return handle | (i >= NR_U32_NODE ? 0xFFF : i);
}
static const struct nla_policy u32_policy[TCA_U32_MAX + 1] = {
diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c
index 85c6465..b6842fd 100644
--- a/net/sctp/ulpevent.c
+++ b/net/sctp/ulpevent.c
@@ -366,9 +366,10 @@
* specification [SCTP] and any extensions for a list of possible
* error formats.
*/
-struct sctp_ulpevent *sctp_ulpevent_make_remote_error(
- const struct sctp_association *asoc, struct sctp_chunk *chunk,
- __u16 flags, gfp_t gfp)
+struct sctp_ulpevent *
+sctp_ulpevent_make_remote_error(const struct sctp_association *asoc,
+ struct sctp_chunk *chunk, __u16 flags,
+ gfp_t gfp)
{
struct sctp_ulpevent *event;
struct sctp_remote_error *sre;
@@ -387,8 +388,7 @@
/* Copy the skb to a new skb with room for us to prepend
* notification with.
*/
- skb = skb_copy_expand(chunk->skb, sizeof(struct sctp_remote_error),
- 0, gfp);
+ skb = skb_copy_expand(chunk->skb, sizeof(*sre), 0, gfp);
/* Pull off the rest of the cause TLV from the chunk. */
skb_pull(chunk->skb, elen);
@@ -399,62 +399,21 @@
event = sctp_skb2event(skb);
sctp_ulpevent_init(event, MSG_NOTIFICATION, skb->truesize);
- sre = (struct sctp_remote_error *)
- skb_push(skb, sizeof(struct sctp_remote_error));
+ sre = (struct sctp_remote_error *) skb_push(skb, sizeof(*sre));
/* Trim the buffer to the right length. */
- skb_trim(skb, sizeof(struct sctp_remote_error) + elen);
+ skb_trim(skb, sizeof(*sre) + elen);
- /* Socket Extensions for SCTP
- * 5.3.1.3 SCTP_REMOTE_ERROR
- *
- * sre_type:
- * It should be SCTP_REMOTE_ERROR.
- */
+ /* RFC6458, Section 6.1.3. SCTP_REMOTE_ERROR */
+ memset(sre, 0, sizeof(*sre));
sre->sre_type = SCTP_REMOTE_ERROR;
-
- /*
- * Socket Extensions for SCTP
- * 5.3.1.3 SCTP_REMOTE_ERROR
- *
- * sre_flags: 16 bits (unsigned integer)
- * Currently unused.
- */
sre->sre_flags = 0;
-
- /* Socket Extensions for SCTP
- * 5.3.1.3 SCTP_REMOTE_ERROR
- *
- * sre_length: sizeof (__u32)
- *
- * This field is the total length of the notification data,
- * including the notification header.
- */
sre->sre_length = skb->len;
-
- /* Socket Extensions for SCTP
- * 5.3.1.3 SCTP_REMOTE_ERROR
- *
- * sre_error: 16 bits (unsigned integer)
- * This value represents one of the Operational Error causes defined in
- * the SCTP specification, in network byte order.
- */
sre->sre_error = cause;
-
- /* Socket Extensions for SCTP
- * 5.3.1.3 SCTP_REMOTE_ERROR
- *
- * sre_assoc_id: sizeof (sctp_assoc_t)
- *
- * The association id field, holds the identifier for the association.
- * All notifications for a given association have the same association
- * identifier. For TCP style socket, this field is ignored.
- */
sctp_ulpevent_set_owner(event, asoc);
sre->sre_assoc_id = sctp_assoc2id(asoc);
return event;
-
fail:
return NULL;
}
@@ -899,7 +858,9 @@
return notification->sn_header.sn_type;
}
-/* Copy out the sndrcvinfo into a msghdr. */
+/* RFC6458, Section 5.3.2. SCTP Header Information Structure
+ * (SCTP_SNDRCV, DEPRECATED)
+ */
void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event,
struct msghdr *msghdr)
{
@@ -908,74 +869,21 @@
if (sctp_ulpevent_is_notification(event))
return;
- /* Sockets API Extensions for SCTP
- * Section 5.2.2 SCTP Header Information Structure (SCTP_SNDRCV)
- *
- * sinfo_stream: 16 bits (unsigned integer)
- *
- * For recvmsg() the SCTP stack places the message's stream number in
- * this value.
- */
+ memset(&sinfo, 0, sizeof(sinfo));
sinfo.sinfo_stream = event->stream;
- /* sinfo_ssn: 16 bits (unsigned integer)
- *
- * For recvmsg() this value contains the stream sequence number that
- * the remote endpoint placed in the DATA chunk. For fragmented
- * messages this is the same number for all deliveries of the message
- * (if more than one recvmsg() is needed to read the message).
- */
sinfo.sinfo_ssn = event->ssn;
- /* sinfo_ppid: 32 bits (unsigned integer)
- *
- * In recvmsg() this value is
- * the same information that was passed by the upper layer in the peer
- * application. Please note that byte order issues are NOT accounted
- * for and this information is passed opaquely by the SCTP stack from
- * one end to the other.
- */
sinfo.sinfo_ppid = event->ppid;
- /* sinfo_flags: 16 bits (unsigned integer)
- *
- * This field may contain any of the following flags and is composed of
- * a bitwise OR of these values.
- *
- * recvmsg() flags:
- *
- * SCTP_UNORDERED - This flag is present when the message was sent
- * non-ordered.
- */
sinfo.sinfo_flags = event->flags;
- /* sinfo_tsn: 32 bit (unsigned integer)
- *
- * For the receiving side, this field holds a TSN that was
- * assigned to one of the SCTP Data Chunks.
- */
sinfo.sinfo_tsn = event->tsn;
- /* sinfo_cumtsn: 32 bit (unsigned integer)
- *
- * This field will hold the current cumulative TSN as
- * known by the underlying SCTP layer. Note this field is
- * ignored when sending and only valid for a receive
- * operation when sinfo_flags are set to SCTP_UNORDERED.
- */
sinfo.sinfo_cumtsn = event->cumtsn;
- /* sinfo_assoc_id: sizeof (sctp_assoc_t)
- *
- * The association handle field, sinfo_assoc_id, holds the identifier
- * for the association announced in the COMMUNICATION_UP notification.
- * All notifications for a given association have the same identifier.
- * Ignored for one-to-one style sockets.
- */
sinfo.sinfo_assoc_id = sctp_assoc2id(event->asoc);
-
- /* context value that is set via SCTP_CONTEXT socket option. */
+ /* Context value that is set via SCTP_CONTEXT socket option. */
sinfo.sinfo_context = event->asoc->default_rcv_context;
-
/* These fields are not used while receiving. */
sinfo.sinfo_timetolive = 0;
put_cmsg(msghdr, IPPROTO_SCTP, SCTP_SNDRCV,
- sizeof(struct sctp_sndrcvinfo), (void *)&sinfo);
+ sizeof(sinfo), &sinfo);
}
/* Do accounting for bytes received and hold a reference to the association
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index 2663167..55c6c9d 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -559,6 +559,7 @@
buf = node->bclink.deferred_head;
node->bclink.deferred_head = buf->next;
+ buf->next = NULL;
node->bclink.deferred_size--;
goto receive;
}
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
index 8be6e94..0a37a47 100644
--- a/net/tipc/msg.c
+++ b/net/tipc/msg.c
@@ -101,9 +101,11 @@
}
/* tipc_buf_append(): Append a buffer to the fragment list of another buffer
- * Let first buffer become head buffer
- * Returns 1 and sets *buf to headbuf if chain is complete, otherwise 0
- * Leaves headbuf pointer at NULL if failure
+ * @*headbuf: in: NULL for first frag, otherwise value returned from prev call
+ * out: set when successful non-complete reassembly, otherwise NULL
+ * @*buf: in: the buffer to append. Always defined
+ * out: head buf after sucessful complete reassembly, otherwise NULL
+ * Returns 1 when reassembly complete, otherwise 0
*/
int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
{
@@ -122,6 +124,7 @@
goto out_free;
head = *headbuf = frag;
skb_frag_list_init(head);
+ *buf = NULL;
return 0;
}
if (!head)
@@ -150,5 +153,7 @@
out_free:
pr_warn_ratelimited("Unable to build fragment list\n");
kfree_skb(*buf);
+ kfree_skb(*headbuf);
+ *buf = *headbuf = NULL;
return 0;
}
diff --git a/net/wireless/core.h b/net/wireless/core.h
index e9afbf1..7e3a3ce 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -424,7 +424,7 @@
if (end >= start)
return jiffies_to_msecs(end - start);
- return jiffies_to_msecs(end + (MAX_JIFFY_OFFSET - start) + 1);
+ return jiffies_to_msecs(end + (ULONG_MAX - start) + 1);
}
void
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index ba4f172..6668daf 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -1497,18 +1497,17 @@
}
CMD(start_p2p_device, START_P2P_DEVICE);
CMD(set_mcast_rate, SET_MCAST_RATE);
+#ifdef CONFIG_NL80211_TESTMODE
+ CMD(testmode_cmd, TESTMODE);
+#endif
if (state->split) {
CMD(crit_proto_start, CRIT_PROTOCOL_START);
CMD(crit_proto_stop, CRIT_PROTOCOL_STOP);
if (rdev->wiphy.flags & WIPHY_FLAG_HAS_CHANNEL_SWITCH)
CMD(channel_switch, CHANNEL_SWITCH);
+ CMD(set_qos_map, SET_QOS_MAP);
}
- CMD(set_qos_map, SET_QOS_MAP);
-
-#ifdef CONFIG_NL80211_TESTMODE
- CMD(testmode_cmd, TESTMODE);
-#endif
-
+ /* add into the if now */
#undef CMD
if (rdev->ops->connect || rdev->ops->auth) {
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 558b0e3..1afdf45 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -935,7 +935,7 @@
if (!band_rule_found)
band_rule_found = freq_in_rule_band(fr, center_freq);
- bw_fits = reg_does_bw_fit(fr, center_freq, MHZ_TO_KHZ(5));
+ bw_fits = reg_does_bw_fit(fr, center_freq, MHZ_TO_KHZ(20));
if (band_rule_found && bw_fits)
return rr;
@@ -1019,10 +1019,10 @@
}
#endif
-/* Find an ieee80211_reg_rule such that a 5MHz channel with frequency
- * chan->center_freq fits there.
- * If there is no such reg_rule, disable the channel, otherwise set the
- * flags corresponding to the bandwidths allowed in the particular reg_rule
+/*
+ * Note that right now we assume the desired channel bandwidth
+ * is always 20 MHz for each individual channel (HT40 uses 20 MHz
+ * per channel, the primary and the extension channel).
*/
static void handle_channel(struct wiphy *wiphy,
enum nl80211_reg_initiator initiator,
@@ -1083,12 +1083,8 @@
if (reg_rule->flags & NL80211_RRF_AUTO_BW)
max_bandwidth_khz = reg_get_max_bandwidth(regd, reg_rule);
- if (max_bandwidth_khz < MHZ_TO_KHZ(10))
- bw_flags = IEEE80211_CHAN_NO_10MHZ;
- if (max_bandwidth_khz < MHZ_TO_KHZ(20))
- bw_flags |= IEEE80211_CHAN_NO_20MHZ;
if (max_bandwidth_khz < MHZ_TO_KHZ(40))
- bw_flags |= IEEE80211_CHAN_NO_HT40;
+ bw_flags = IEEE80211_CHAN_NO_HT40;
if (max_bandwidth_khz < MHZ_TO_KHZ(80))
bw_flags |= IEEE80211_CHAN_NO_80MHZ;
if (max_bandwidth_khz < MHZ_TO_KHZ(160))
@@ -1522,12 +1518,8 @@
if (reg_rule->flags & NL80211_RRF_AUTO_BW)
max_bandwidth_khz = reg_get_max_bandwidth(regd, reg_rule);
- if (max_bandwidth_khz < MHZ_TO_KHZ(10))
- bw_flags = IEEE80211_CHAN_NO_10MHZ;
- if (max_bandwidth_khz < MHZ_TO_KHZ(20))
- bw_flags |= IEEE80211_CHAN_NO_20MHZ;
if (max_bandwidth_khz < MHZ_TO_KHZ(40))
- bw_flags |= IEEE80211_CHAN_NO_HT40;
+ bw_flags = IEEE80211_CHAN_NO_HT40;
if (max_bandwidth_khz < MHZ_TO_KHZ(80))
bw_flags |= IEEE80211_CHAN_NO_80MHZ;
if (max_bandwidth_khz < MHZ_TO_KHZ(160))
diff --git a/sound/firewire/bebob/bebob_maudio.c b/sound/firewire/bebob/bebob_maudio.c
index 6af50eb..70faa3a 100644
--- a/sound/firewire/bebob/bebob_maudio.c
+++ b/sound/firewire/bebob/bebob_maudio.c
@@ -379,11 +379,11 @@
struct special_params *params = bebob->maudio_special_quirk;
int err, id;
- mutex_lock(&bebob->mutex);
-
id = uval->value.enumerated.item[0];
if (id >= ARRAY_SIZE(special_clk_labels))
- return 0;
+ return -EINVAL;
+
+ mutex_lock(&bebob->mutex);
err = avc_maudio_set_special_clk(bebob, id,
params->dig_in_fmt,
@@ -391,7 +391,10 @@
params->clk_lock);
mutex_unlock(&bebob->mutex);
- return err >= 0;
+ if (err >= 0)
+ err = 1;
+
+ return err;
}
static struct snd_kcontrol_new special_clk_ctl = {
.name = "Clock Source",
@@ -434,8 +437,8 @@
.get = special_sync_ctl_get,
};
-/* Digital interface control for special firmware */
-static char *const special_dig_iface_labels[] = {
+/* Digital input interface control for special firmware */
+static char *const special_dig_in_iface_labels[] = {
"S/PDIF Optical", "S/PDIF Coaxial", "ADAT Optical"
};
static int special_dig_in_iface_ctl_info(struct snd_kcontrol *kctl,
@@ -443,13 +446,13 @@
{
einf->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
einf->count = 1;
- einf->value.enumerated.items = ARRAY_SIZE(special_dig_iface_labels);
+ einf->value.enumerated.items = ARRAY_SIZE(special_dig_in_iface_labels);
if (einf->value.enumerated.item >= einf->value.enumerated.items)
einf->value.enumerated.item = einf->value.enumerated.items - 1;
strcpy(einf->value.enumerated.name,
- special_dig_iface_labels[einf->value.enumerated.item]);
+ special_dig_in_iface_labels[einf->value.enumerated.item]);
return 0;
}
@@ -491,26 +494,36 @@
unsigned int id, dig_in_fmt, dig_in_iface;
int err;
- mutex_lock(&bebob->mutex);
-
id = uval->value.enumerated.item[0];
+ if (id >= ARRAY_SIZE(special_dig_in_iface_labels))
+ return -EINVAL;
/* decode user value */
dig_in_fmt = (id >> 1) & 0x01;
dig_in_iface = id & 0x01;
+ mutex_lock(&bebob->mutex);
+
err = avc_maudio_set_special_clk(bebob,
params->clk_src,
dig_in_fmt,
params->dig_out_fmt,
params->clk_lock);
- if ((err < 0) || (params->dig_in_fmt > 0)) /* ADAT */
+ if (err < 0)
goto end;
+ /* For ADAT, optical interface is only available. */
+ if (params->dig_in_fmt > 0) {
+ err = 1;
+ goto end;
+ }
+
+ /* For S/PDIF, optical/coaxial interfaces are selectable. */
err = avc_audio_set_selector(bebob->unit, 0x00, 0x04, dig_in_iface);
if (err < 0)
dev_err(&bebob->unit->device,
"fail to set digital input interface: %d\n", err);
+ err = 1;
end:
special_stream_formation_set(bebob);
mutex_unlock(&bebob->mutex);
@@ -525,18 +538,22 @@
.put = special_dig_in_iface_ctl_set
};
+/* Digital output interface control for special firmware */
+static char *const special_dig_out_iface_labels[] = {
+ "S/PDIF Optical and Coaxial", "ADAT Optical"
+};
static int special_dig_out_iface_ctl_info(struct snd_kcontrol *kctl,
struct snd_ctl_elem_info *einf)
{
einf->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
einf->count = 1;
- einf->value.enumerated.items = ARRAY_SIZE(special_dig_iface_labels) - 1;
+ einf->value.enumerated.items = ARRAY_SIZE(special_dig_out_iface_labels);
if (einf->value.enumerated.item >= einf->value.enumerated.items)
einf->value.enumerated.item = einf->value.enumerated.items - 1;
strcpy(einf->value.enumerated.name,
- special_dig_iface_labels[einf->value.enumerated.item + 1]);
+ special_dig_out_iface_labels[einf->value.enumerated.item]);
return 0;
}
@@ -558,16 +575,20 @@
unsigned int id;
int err;
- mutex_lock(&bebob->mutex);
-
id = uval->value.enumerated.item[0];
+ if (id >= ARRAY_SIZE(special_dig_out_iface_labels))
+ return -EINVAL;
+
+ mutex_lock(&bebob->mutex);
err = avc_maudio_set_special_clk(bebob,
params->clk_src,
params->dig_in_fmt,
id, params->clk_lock);
- if (err >= 0)
+ if (err >= 0) {
special_stream_formation_set(bebob);
+ err = 1;
+ }
mutex_unlock(&bebob->mutex);
return err;
diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c
index 480bbdd..6df04d9 100644
--- a/sound/pci/hda/hda_controller.c
+++ b/sound/pci/hda/hda_controller.c
@@ -193,7 +193,8 @@
dsp_unlock(azx_dev);
return azx_dev;
}
- if (!res)
+ if (!res ||
+ (chip->driver_caps & AZX_DCAPS_REVERSE_ASSIGN))
res = azx_dev;
}
dsp_unlock(azx_dev);
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index b6b4e71..83cd190 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -227,7 +227,7 @@
/* quirks for Intel PCH */
#define AZX_DCAPS_INTEL_PCH_NOPM \
(AZX_DCAPS_SCH_SNOOP | AZX_DCAPS_BUFSIZE | \
- AZX_DCAPS_COUNT_LPIB_DELAY)
+ AZX_DCAPS_COUNT_LPIB_DELAY | AZX_DCAPS_REVERSE_ASSIGN)
#define AZX_DCAPS_INTEL_PCH \
(AZX_DCAPS_INTEL_PCH_NOPM | AZX_DCAPS_PM_RUNTIME)
@@ -596,7 +596,7 @@
struct azx *chip = card->private_data;
struct azx_pcm *p;
- if (chip->disabled)
+ if (chip->disabled || chip->init_failed)
return 0;
snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
@@ -628,7 +628,7 @@
struct snd_card *card = dev_get_drvdata(dev);
struct azx *chip = card->private_data;
- if (chip->disabled)
+ if (chip->disabled || chip->init_failed)
return 0;
if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) {
@@ -665,7 +665,7 @@
struct snd_card *card = dev_get_drvdata(dev);
struct azx *chip = card->private_data;
- if (chip->disabled)
+ if (chip->disabled || chip->init_failed)
return 0;
if (!(chip->driver_caps & AZX_DCAPS_PM_RUNTIME))
@@ -692,7 +692,7 @@
struct hda_codec *codec;
int status;
- if (chip->disabled)
+ if (chip->disabled || chip->init_failed)
return 0;
if (!(chip->driver_caps & AZX_DCAPS_PM_RUNTIME))
@@ -729,7 +729,7 @@
struct snd_card *card = dev_get_drvdata(dev);
struct azx *chip = card->private_data;
- if (chip->disabled)
+ if (chip->disabled || chip->init_failed)
return 0;
if (!power_save_controller ||
diff --git a/sound/pci/hda/hda_priv.h b/sound/pci/hda/hda_priv.h
index 4a7cb01..e9d1a57 100644
--- a/sound/pci/hda/hda_priv.h
+++ b/sound/pci/hda/hda_priv.h
@@ -186,6 +186,7 @@
#define AZX_DCAPS_BUFSIZE (1 << 21) /* no buffer size alignment */
#define AZX_DCAPS_ALIGN_BUFSIZE (1 << 22) /* buffer size alignment */
#define AZX_DCAPS_4K_BDLE_BOUNDARY (1 << 23) /* BDLE in 4k boundary */
+#define AZX_DCAPS_REVERSE_ASSIGN (1 << 24) /* Assign devices in reverse order */
#define AZX_DCAPS_COUNT_LPIB_DELAY (1 << 25) /* Take LPIB as delay */
#define AZX_DCAPS_PM_RUNTIME (1 << 26) /* runtime PM support */
#define AZX_DCAPS_I915_POWERWELL (1 << 27) /* HSW i915 powerwell support */
diff --git a/sound/pci/hda/hda_tegra.c b/sound/pci/hda/hda_tegra.c
index a366ba9..358414d 100644
--- a/sound/pci/hda/hda_tegra.c
+++ b/sound/pci/hda/hda_tegra.c
@@ -236,6 +236,7 @@
return rc;
}
+#ifdef CONFIG_PM_SLEEP
static void hda_tegra_disable_clocks(struct hda_tegra *data)
{
clk_disable_unprepare(data->hda2hdmi_clk);
@@ -243,7 +244,6 @@
clk_disable_unprepare(data->hda_clk);
}
-#ifdef CONFIG_PM_SLEEP
/*
* power management
*/
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 4fe876b..ba4ca52 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -3337,6 +3337,7 @@
{ .id = 0x10de0051, .name = "GPU 51 HDMI/DP", .patch = patch_nvhdmi },
{ .id = 0x10de0060, .name = "GPU 60 HDMI/DP", .patch = patch_nvhdmi },
{ .id = 0x10de0067, .name = "MCP67 HDMI", .patch = patch_nvhdmi_2ch },
+{ .id = 0x10de0070, .name = "GPU 70 HDMI/DP", .patch = patch_nvhdmi },
{ .id = 0x10de0071, .name = "GPU 71 HDMI/DP", .patch = patch_nvhdmi },
{ .id = 0x10de8001, .name = "MCP73 HDMI", .patch = patch_nvhdmi_2ch },
{ .id = 0x11069f80, .name = "VX900 HDMI/DP", .patch = patch_via_hdmi },
@@ -3394,6 +3395,7 @@
MODULE_ALIAS("snd-hda-codec-id:10de0051");
MODULE_ALIAS("snd-hda-codec-id:10de0060");
MODULE_ALIAS("snd-hda-codec-id:10de0067");
+MODULE_ALIAS("snd-hda-codec-id:10de0070");
MODULE_ALIAS("snd-hda-codec-id:10de0071");
MODULE_ALIAS("snd-hda-codec-id:10de8001");
MODULE_ALIAS("snd-hda-codec-id:11069f80");
diff --git a/tools/lib/lockdep/include/liblockdep/mutex.h b/tools/lib/lockdep/include/liblockdep/mutex.h
index c342f70..ee53a42 100644
--- a/tools/lib/lockdep/include/liblockdep/mutex.h
+++ b/tools/lib/lockdep/include/liblockdep/mutex.h
@@ -35,7 +35,7 @@
static inline int liblockdep_pthread_mutex_lock(liblockdep_pthread_mutex_t *lock)
{
- lock_acquire(&lock->dep_map, 0, 0, 0, 2, NULL, (unsigned long)_RET_IP_);
+ lock_acquire(&lock->dep_map, 0, 0, 0, 1, NULL, (unsigned long)_RET_IP_);
return pthread_mutex_lock(&lock->mutex);
}
@@ -47,7 +47,7 @@
static inline int liblockdep_pthread_mutex_trylock(liblockdep_pthread_mutex_t *lock)
{
- lock_acquire(&lock->dep_map, 0, 1, 0, 2, NULL, (unsigned long)_RET_IP_);
+ lock_acquire(&lock->dep_map, 0, 1, 0, 1, NULL, (unsigned long)_RET_IP_);
return pthread_mutex_trylock(&lock->mutex) == 0 ? 1 : 0;
}
diff --git a/tools/lib/lockdep/include/liblockdep/rwlock.h b/tools/lib/lockdep/include/liblockdep/rwlock.h
index a680ab8..4ec03f8 100644
--- a/tools/lib/lockdep/include/liblockdep/rwlock.h
+++ b/tools/lib/lockdep/include/liblockdep/rwlock.h
@@ -36,7 +36,7 @@
static inline int liblockdep_pthread_rwlock_rdlock(liblockdep_pthread_rwlock_t *lock)
{
- lock_acquire(&lock->dep_map, 0, 0, 2, 2, NULL, (unsigned long)_RET_IP_);
+ lock_acquire(&lock->dep_map, 0, 0, 2, 1, NULL, (unsigned long)_RET_IP_);
return pthread_rwlock_rdlock(&lock->rwlock);
}
@@ -49,19 +49,19 @@
static inline int liblockdep_pthread_rwlock_wrlock(liblockdep_pthread_rwlock_t *lock)
{
- lock_acquire(&lock->dep_map, 0, 0, 0, 2, NULL, (unsigned long)_RET_IP_);
+ lock_acquire(&lock->dep_map, 0, 0, 0, 1, NULL, (unsigned long)_RET_IP_);
return pthread_rwlock_wrlock(&lock->rwlock);
}
static inline int liblockdep_pthread_rwlock_tryrdlock(liblockdep_pthread_rwlock_t *lock)
{
- lock_acquire(&lock->dep_map, 0, 1, 2, 2, NULL, (unsigned long)_RET_IP_);
+ lock_acquire(&lock->dep_map, 0, 1, 2, 1, NULL, (unsigned long)_RET_IP_);
return pthread_rwlock_tryrdlock(&lock->rwlock) == 0 ? 1 : 0;
}
static inline int liblockdep_pthread_rwlock_trywlock(liblockdep_pthread_rwlock_t *lock)
{
- lock_acquire(&lock->dep_map, 0, 1, 0, 2, NULL, (unsigned long)_RET_IP_);
+ lock_acquire(&lock->dep_map, 0, 1, 0, 1, NULL, (unsigned long)_RET_IP_);
return pthread_rwlock_trywlock(&lock->rwlock) == 0 ? 1 : 0;
}
diff --git a/tools/lib/lockdep/preload.c b/tools/lib/lockdep/preload.c
index 23bd69c..6f80360 100644
--- a/tools/lib/lockdep/preload.c
+++ b/tools/lib/lockdep/preload.c
@@ -92,7 +92,7 @@
static void init_preload(void);
static void try_init_preload(void)
{
- if (!__init_state != done)
+ if (__init_state != done)
init_preload();
}
@@ -252,7 +252,7 @@
try_init_preload();
- lock_acquire(&__get_lock(mutex)->dep_map, 0, 0, 0, 2, NULL,
+ lock_acquire(&__get_lock(mutex)->dep_map, 0, 0, 0, 1, NULL,
(unsigned long)_RET_IP_);
/*
* Here's the thing with pthread mutexes: unlike the kernel variant,
@@ -281,7 +281,7 @@
try_init_preload();
- lock_acquire(&__get_lock(mutex)->dep_map, 0, 1, 0, 2, NULL, (unsigned long)_RET_IP_);
+ lock_acquire(&__get_lock(mutex)->dep_map, 0, 1, 0, 1, NULL, (unsigned long)_RET_IP_);
r = ll_pthread_mutex_trylock(mutex);
if (r)
lock_release(&__get_lock(mutex)->dep_map, 0, (unsigned long)_RET_IP_);
@@ -303,7 +303,7 @@
*/
r = ll_pthread_mutex_unlock(mutex);
if (r)
- lock_acquire(&__get_lock(mutex)->dep_map, 0, 0, 0, 2, NULL, (unsigned long)_RET_IP_);
+ lock_acquire(&__get_lock(mutex)->dep_map, 0, 0, 0, 1, NULL, (unsigned long)_RET_IP_);
return r;
}
@@ -352,7 +352,7 @@
init_preload();
- lock_acquire(&__get_lock(rwlock)->dep_map, 0, 0, 2, 2, NULL, (unsigned long)_RET_IP_);
+ lock_acquire(&__get_lock(rwlock)->dep_map, 0, 0, 2, 1, NULL, (unsigned long)_RET_IP_);
r = ll_pthread_rwlock_rdlock(rwlock);
if (r)
lock_release(&__get_lock(rwlock)->dep_map, 0, (unsigned long)_RET_IP_);
@@ -366,7 +366,7 @@
init_preload();
- lock_acquire(&__get_lock(rwlock)->dep_map, 0, 1, 2, 2, NULL, (unsigned long)_RET_IP_);
+ lock_acquire(&__get_lock(rwlock)->dep_map, 0, 1, 2, 1, NULL, (unsigned long)_RET_IP_);
r = ll_pthread_rwlock_tryrdlock(rwlock);
if (r)
lock_release(&__get_lock(rwlock)->dep_map, 0, (unsigned long)_RET_IP_);
@@ -380,7 +380,7 @@
init_preload();
- lock_acquire(&__get_lock(rwlock)->dep_map, 0, 1, 0, 2, NULL, (unsigned long)_RET_IP_);
+ lock_acquire(&__get_lock(rwlock)->dep_map, 0, 1, 0, 1, NULL, (unsigned long)_RET_IP_);
r = ll_pthread_rwlock_trywrlock(rwlock);
if (r)
lock_release(&__get_lock(rwlock)->dep_map, 0, (unsigned long)_RET_IP_);
@@ -394,7 +394,7 @@
init_preload();
- lock_acquire(&__get_lock(rwlock)->dep_map, 0, 0, 0, 2, NULL, (unsigned long)_RET_IP_);
+ lock_acquire(&__get_lock(rwlock)->dep_map, 0, 0, 0, 1, NULL, (unsigned long)_RET_IP_);
r = ll_pthread_rwlock_wrlock(rwlock);
if (r)
lock_release(&__get_lock(rwlock)->dep_map, 0, (unsigned long)_RET_IP_);
@@ -411,7 +411,7 @@
lock_release(&__get_lock(rwlock)->dep_map, 0, (unsigned long)_RET_IP_);
r = ll_pthread_rwlock_unlock(rwlock);
if (r)
- lock_acquire(&__get_lock(rwlock)->dep_map, 0, 0, 0, 2, NULL, (unsigned long)_RET_IP_);
+ lock_acquire(&__get_lock(rwlock)->dep_map, 0, 0, 0, 1, NULL, (unsigned long)_RET_IP_);
return r;
}
@@ -439,8 +439,6 @@
ll_pthread_rwlock_unlock = dlsym(RTLD_NEXT, "pthread_rwlock_unlock");
#endif
- printf("%p\n", ll_pthread_mutex_trylock);fflush(stdout);
-
lockdep_init();
__init_state = done;
diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
index 52c03fb..04a229a 100644
--- a/tools/perf/ui/browsers/hists.c
+++ b/tools/perf/ui/browsers/hists.c
@@ -17,6 +17,7 @@
#include "../util.h"
#include "../ui.h"
#include "map.h"
+#include "annotate.h"
struct hist_browser {
struct ui_browser b;
@@ -1593,13 +1594,18 @@
bi->to.sym->name) > 0)
annotate_t = nr_options++;
} else {
-
if (browser->selection != NULL &&
browser->selection->sym != NULL &&
- !browser->selection->map->dso->annotate_warned &&
- asprintf(&options[nr_options], "Annotate %s",
- browser->selection->sym->name) > 0)
- annotate = nr_options++;
+ !browser->selection->map->dso->annotate_warned) {
+ struct annotation *notes;
+
+ notes = symbol__annotation(browser->selection->sym);
+
+ if (notes->src &&
+ asprintf(&options[nr_options], "Annotate %s",
+ browser->selection->sym->name) > 0)
+ annotate = nr_options++;
+ }
}
if (thread != NULL &&
@@ -1656,6 +1662,7 @@
if (choice == annotate || choice == annotate_t || choice == annotate_f) {
struct hist_entry *he;
+ struct annotation *notes;
int err;
do_annotate:
if (!objdump_path && perf_session_env__lookup_objdump(env))
@@ -1679,6 +1686,10 @@
he->ms.map = he->branch_info->to.map;
}
+ notes = symbol__annotation(he->ms.sym);
+ if (!notes->src)
+ continue;
+
/*
* Don't let this be freed, say, by hists__decay_entry.
*/
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index 0e5fea9..c73e1fc 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -496,18 +496,6 @@
u64 start;
};
-static int symbol__in_kernel(void *arg, const char *name,
- char type __maybe_unused, u64 start)
-{
- struct process_args *args = arg;
-
- if (strchr(name, '['))
- return 0;
-
- args->start = start;
- return 1;
-}
-
static void machine__get_kallsyms_filename(struct machine *machine, char *buf,
size_t bufsz)
{
@@ -517,27 +505,41 @@
scnprintf(buf, bufsz, "%s/proc/kallsyms", machine->root_dir);
}
-/* Figure out the start address of kernel map from /proc/kallsyms */
-static u64 machine__get_kernel_start_addr(struct machine *machine)
+const char *ref_reloc_sym_names[] = {"_text", "_stext", NULL};
+
+/* Figure out the start address of kernel map from /proc/kallsyms.
+ * Returns the name of the start symbol in *symbol_name. Pass in NULL as
+ * symbol_name if it's not that important.
+ */
+static u64 machine__get_kernel_start_addr(struct machine *machine,
+ const char **symbol_name)
{
char filename[PATH_MAX];
- struct process_args args;
+ int i;
+ const char *name;
+ u64 addr = 0;
machine__get_kallsyms_filename(machine, filename, PATH_MAX);
if (symbol__restricted_filename(filename, "/proc/kallsyms"))
return 0;
- if (kallsyms__parse(filename, &args, symbol__in_kernel) <= 0)
- return 0;
+ for (i = 0; (name = ref_reloc_sym_names[i]) != NULL; i++) {
+ addr = kallsyms__get_function_start(filename, name);
+ if (addr)
+ break;
+ }
- return args.start;
+ if (symbol_name)
+ *symbol_name = name;
+
+ return addr;
}
int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel)
{
enum map_type type;
- u64 start = machine__get_kernel_start_addr(machine);
+ u64 start = machine__get_kernel_start_addr(machine, NULL);
for (type = 0; type < MAP__NR_TYPES; ++type) {
struct kmap *kmap;
@@ -852,23 +854,11 @@
return 0;
}
-const char *ref_reloc_sym_names[] = {"_text", "_stext", NULL};
-
int machine__create_kernel_maps(struct machine *machine)
{
struct dso *kernel = machine__get_kernel(machine);
- char filename[PATH_MAX];
const char *name;
- u64 addr = 0;
- int i;
-
- machine__get_kallsyms_filename(machine, filename, PATH_MAX);
-
- for (i = 0; (name = ref_reloc_sym_names[i]) != NULL; i++) {
- addr = kallsyms__get_function_start(filename, name);
- if (addr)
- break;
- }
+ u64 addr = machine__get_kernel_start_addr(machine, &name);
if (!addr)
return -1;
diff --git a/tools/power/acpi/Makefile b/tools/power/acpi/Makefile
index e5a3c4b..3d1537b 100644
--- a/tools/power/acpi/Makefile
+++ b/tools/power/acpi/Makefile
@@ -108,13 +108,18 @@
apmain.o\
osunixdir.o\
osunixmap.o\
+ osunixxf.o\
tbprint.o\
tbxfroot.o\
utbuffer.o\
+ utdebug.o\
utexcep.o\
+ utglobal.o\
utmath.o\
+ utprint.o\
utstring.o\
utxferror.o\
+ oslibcfs.o\
oslinuxtbl.o\
cmfsize.o\
getopt.o
diff --git a/tools/power/acpi/common/cmfsize.c b/tools/power/acpi/common/cmfsize.c
index 5140e5e..f4b9533 100644
--- a/tools/power/acpi/common/cmfsize.c
+++ b/tools/power/acpi/common/cmfsize.c
@@ -58,44 +58,46 @@
* RETURN: File Size. On error, -1 (ACPI_UINT32_MAX)
*
* DESCRIPTION: Get the size of a file. Uses seek-to-EOF. File must be open.
- * Does not disturb the current file pointer. Uses perror for
- * error messages.
+ * Does not disturb the current file pointer.
*
******************************************************************************/
-u32 cm_get_file_size(FILE * file)
+u32 cm_get_file_size(ACPI_FILE file)
{
long file_size;
long current_offset;
+ acpi_status status;
/* Save the current file pointer, seek to EOF to obtain file size */
- current_offset = ftell(file);
+ current_offset = acpi_os_get_file_offset(file);
if (current_offset < 0) {
goto offset_error;
}
- if (fseek(file, 0, SEEK_END)) {
+ status = acpi_os_set_file_offset(file, 0, ACPI_FILE_END);
+ if (ACPI_FAILURE(status)) {
goto seek_error;
}
- file_size = ftell(file);
+ file_size = acpi_os_get_file_offset(file);
if (file_size < 0) {
goto offset_error;
}
/* Restore original file pointer */
- if (fseek(file, current_offset, SEEK_SET)) {
+ status = acpi_os_set_file_offset(file, current_offset, ACPI_FILE_BEGIN);
+ if (ACPI_FAILURE(status)) {
goto seek_error;
}
return ((u32)file_size);
offset_error:
- perror("Could not get file offset");
+ acpi_log_error("Could not get file offset");
return (ACPI_UINT32_MAX);
seek_error:
- perror("Could not seek file");
+ acpi_log_error("Could not set file offset");
return (ACPI_UINT32_MAX);
}
diff --git a/tools/power/acpi/common/getopt.c b/tools/power/acpi/common/getopt.c
index a302f52..2f0f34a 100644
--- a/tools/power/acpi/common/getopt.c
+++ b/tools/power/acpi/common/getopt.c
@@ -51,14 +51,12 @@
* "f|" - Option has required single-char sub-options
*/
-#include <stdio.h>
-#include <string.h>
#include <acpi/acpi.h>
#include "accommon.h"
#include "acapps.h"
#define ACPI_OPTION_ERROR(msg, badchar) \
- if (acpi_gbl_opterr) {fprintf (stderr, "%s%c\n", msg, badchar);}
+ if (acpi_gbl_opterr) {acpi_log_error ("%s%c\n", msg, badchar);}
int acpi_gbl_opterr = 1;
int acpi_gbl_optind = 1;
@@ -113,7 +111,7 @@
* PARAMETERS: argc, argv - from main
* opts - options info list
*
- * RETURN: Option character or EOF
+ * RETURN: Option character or ACPI_OPT_END
*
* DESCRIPTION: Get the next option
*
@@ -128,10 +126,10 @@
if (acpi_gbl_optind >= argc ||
argv[acpi_gbl_optind][0] != '-' ||
argv[acpi_gbl_optind][1] == '\0') {
- return (EOF);
- } else if (strcmp(argv[acpi_gbl_optind], "--") == 0) {
+ return (ACPI_OPT_END);
+ } else if (ACPI_STRCMP(argv[acpi_gbl_optind], "--") == 0) {
acpi_gbl_optind++;
- return (EOF);
+ return (ACPI_OPT_END);
}
}
@@ -142,7 +140,7 @@
/* Make sure that the option is legal */
if (current_char == ':' ||
- (opts_ptr = strchr(opts, current_char)) == NULL) {
+ (opts_ptr = ACPI_STRCHR(opts, current_char)) == NULL) {
ACPI_OPTION_ERROR("Illegal option: -", current_char);
if (argv[acpi_gbl_optind][++current_char_ptr] == '\0') {
diff --git a/tools/power/acpi/os_specific/service_layers/oslibcfs.c b/tools/power/acpi/os_specific/service_layers/oslibcfs.c
new file mode 100644
index 0000000..c13ff9c
--- /dev/null
+++ b/tools/power/acpi/os_specific/service_layers/oslibcfs.c
@@ -0,0 +1,214 @@
+/******************************************************************************
+ *
+ * Module Name: oslibcfs - C library OSL for file I/O
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2014, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include <stdio.h>
+#include <stdarg.h>
+
+#define _COMPONENT ACPI_OS_SERVICES
+ACPI_MODULE_NAME("oslibcfs")
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_os_open_file
+ *
+ * PARAMETERS: path - File path
+ * modes - File operation type
+ *
+ * RETURN: File descriptor.
+ *
+ * DESCRIPTION: Open a file for reading (ACPI_FILE_READING) or/and writing
+ * (ACPI_FILE_WRITING).
+ *
+ ******************************************************************************/
+ACPI_FILE acpi_os_open_file(const char *path, u8 modes)
+{
+ ACPI_FILE file;
+ u32 i = 0;
+ char modes_str[4];
+
+ if (modes & ACPI_FILE_READING) {
+ modes_str[i++] = 'r';
+ }
+ if (modes & ACPI_FILE_WRITING) {
+ modes_str[i++] = 'w';
+ }
+ if (modes & ACPI_FILE_BINARY) {
+ modes_str[i++] = 'b';
+ }
+
+ modes_str[i++] = '\0';
+
+ file = fopen(path, modes_str);
+ if (!file) {
+ perror("Could not open file");
+ }
+
+ return (file);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_os_close_file
+ *
+ * PARAMETERS: file - An open file descriptor
+ *
+ * RETURN: None.
+ *
+ * DESCRIPTION: Close a file opened via acpi_os_open_file.
+ *
+ ******************************************************************************/
+
+void acpi_os_close_file(ACPI_FILE file)
+{
+ fclose(file);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_os_read_file
+ *
+ * PARAMETERS: file - An open file descriptor
+ * buffer - Data buffer
+ * size - Data block size
+ * count - Number of data blocks
+ *
+ * RETURN: Number of bytes actually read.
+ *
+ * DESCRIPTION: Read from a file.
+ *
+ ******************************************************************************/
+
+int
+acpi_os_read_file(ACPI_FILE file, void *buffer, acpi_size size, acpi_size count)
+{
+ int length;
+
+ length = fread(buffer, size, count, file);
+ if (length < 0) {
+ perror("Error reading file");
+ }
+
+ return (length);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_os_write_file
+ *
+ * PARAMETERS: file - An open file descriptor
+ * buffer - Data buffer
+ * size - Data block size
+ * count - Number of data blocks
+ *
+ * RETURN: Number of bytes actually written.
+ *
+ * DESCRIPTION: Write to a file.
+ *
+ ******************************************************************************/
+
+int
+acpi_os_write_file(ACPI_FILE file,
+ void *buffer, acpi_size size, acpi_size count)
+{
+ int length;
+
+ length = fwrite(buffer, size, count, file);
+ if (length < 0) {
+ perror("Error writing file");
+ }
+
+ return (length);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_os_get_file_offset
+ *
+ * PARAMETERS: file - An open file descriptor
+ *
+ * RETURN: Current file pointer position.
+ *
+ * DESCRIPTION: Get current file offset.
+ *
+ ******************************************************************************/
+
+long acpi_os_get_file_offset(ACPI_FILE file)
+{
+ long offset;
+
+ offset = ftell(file);
+ return (offset);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_os_set_file_offset
+ *
+ * PARAMETERS: file - An open file descriptor
+ * offset - New file offset
+ * from - From begin/end of file
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Set current file offset.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_os_set_file_offset(ACPI_FILE file, long offset, u8 from)
+{
+ int ret = 0;
+
+ if (from == ACPI_FILE_BEGIN) {
+ ret = fseek(file, offset, SEEK_SET);
+ }
+ if (from == ACPI_FILE_END) {
+ ret = fseek(file, offset, SEEK_END);
+ }
+
+ if (ret < 0) {
+ return (AE_ERROR);
+ } else {
+ return (AE_OK);
+ }
+}
diff --git a/tools/power/acpi/os_specific/service_layers/oslinuxtbl.c b/tools/power/acpi/os_specific/service_layers/oslinuxtbl.c
index 28c5200..0dc2485 100644
--- a/tools/power/acpi/os_specific/service_layers/oslinuxtbl.c
+++ b/tools/power/acpi/os_specific/service_layers/oslinuxtbl.c
@@ -77,6 +77,9 @@
static void osl_unmap_table(struct acpi_table_header *table);
+static acpi_physical_address
+osl_find_rsdp_via_efi_by_keyword(FILE * file, const char *keyword);
+
static acpi_physical_address osl_find_rsdp_via_efi(void);
static acpi_status osl_load_rsdp(void);
@@ -417,6 +420,38 @@
/******************************************************************************
*
+ * FUNCTION: osl_find_rsdp_via_efi_by_keyword
+ *
+ * PARAMETERS: keyword - Character string indicating ACPI GUID version
+ * in the EFI table
+ *
+ * RETURN: RSDP address if found
+ *
+ * DESCRIPTION: Find RSDP address via EFI using keyword indicating the ACPI
+ * GUID version.
+ *
+ *****************************************************************************/
+
+static acpi_physical_address
+osl_find_rsdp_via_efi_by_keyword(FILE * file, const char *keyword)
+{
+ char buffer[80];
+ unsigned long long address = 0;
+ char format[32];
+
+ snprintf(format, 32, "%s=%s", keyword, "%llx");
+ fseek(file, 0, SEEK_SET);
+ while (fgets(buffer, 80, file)) {
+ if (sscanf(buffer, format, &address) == 1) {
+ break;
+ }
+ }
+
+ return ((acpi_physical_address) (address));
+}
+
+/******************************************************************************
+ *
* FUNCTION: osl_find_rsdp_via_efi
*
* PARAMETERS: None
@@ -430,20 +465,19 @@
static acpi_physical_address osl_find_rsdp_via_efi(void)
{
FILE *file;
- char buffer[80];
- unsigned long address = 0;
+ acpi_physical_address address = 0;
file = fopen(EFI_SYSTAB, "r");
if (file) {
- while (fgets(buffer, 80, file)) {
- if (sscanf(buffer, "ACPI20=0x%lx", &address) == 1) {
- break;
- }
+ address = osl_find_rsdp_via_efi_by_keyword(file, "ACPI20");
+ if (!address) {
+ address =
+ osl_find_rsdp_via_efi_by_keyword(file, "ACPI");
}
fclose(file);
}
- return ((acpi_physical_address) (address));
+ return (address);
}
/******************************************************************************
diff --git a/tools/power/acpi/os_specific/service_layers/osunixxf.c b/tools/power/acpi/os_specific/service_layers/osunixxf.c
new file mode 100644
index 0000000..60b58cd
--- /dev/null
+++ b/tools/power/acpi/os_specific/service_layers/osunixxf.c
@@ -0,0 +1,1311 @@
+/******************************************************************************
+ *
+ * Module Name: osunixxf - UNIX OSL interfaces
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2014, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+/*
+ * These interfaces are required in order to compile the ASL compiler and the
+ * various ACPICA tools under Linux or other Unix-like system.
+ */
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "amlcode.h"
+#include "acparser.h"
+#include "acdebug.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdarg.h>
+#include <unistd.h>
+#include <sys/time.h>
+#include <semaphore.h>
+#include <pthread.h>
+#include <errno.h>
+
+#define _COMPONENT ACPI_OS_SERVICES
+ACPI_MODULE_NAME("osunixxf")
+
+u8 acpi_gbl_debug_timeout = FALSE;
+
+/* Upcalls to acpi_exec */
+
+void
+ae_table_override(struct acpi_table_header *existing_table,
+ struct acpi_table_header **new_table);
+
+typedef void *(*PTHREAD_CALLBACK) (void *);
+
+/* Buffer used by acpi_os_vprintf */
+
+#define ACPI_VPRINTF_BUFFER_SIZE 512
+#define _ASCII_NEWLINE '\n'
+
+/* Terminal support for acpi_exec only */
+
+#ifdef ACPI_EXEC_APP
+#include <termios.h>
+
+struct termios original_term_attributes;
+int term_attributes_were_set = 0;
+
+acpi_status acpi_ut_read_line(char *buffer, u32 buffer_length, u32 *bytes_read);
+
+static void os_enter_line_edit_mode(void);
+
+static void os_exit_line_edit_mode(void);
+
+/******************************************************************************
+ *
+ * FUNCTION: os_enter_line_edit_mode, os_exit_line_edit_mode
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Enter/Exit the raw character input mode for the terminal.
+ *
+ * Interactive line-editing support for the AML debugger. Used with the
+ * common/acgetline module.
+ *
+ * readline() is not used because of non-portability. It is not available
+ * on all systems, and if it is, often the package must be manually installed.
+ *
+ * Therefore, we use the POSIX tcgetattr/tcsetattr and do the minimal line
+ * editing that we need in acpi_os_get_line.
+ *
+ * If the POSIX tcgetattr/tcsetattr interfaces are unavailable, these
+ * calls will also work:
+ * For os_enter_line_edit_mode: system ("stty cbreak -echo")
+ * For os_exit_line_edit_mode: system ("stty cooked echo")
+ *
+ *****************************************************************************/
+
+static void os_enter_line_edit_mode(void)
+{
+ struct termios local_term_attributes;
+
+ /* Get and keep the original attributes */
+
+ if (tcgetattr(STDIN_FILENO, &original_term_attributes)) {
+ fprintf(stderr, "Could not get terminal attributes!\n");
+ return;
+ }
+
+ /* Set the new attributes to enable raw character input */
+
+ memcpy(&local_term_attributes, &original_term_attributes,
+ sizeof(struct termios));
+
+ local_term_attributes.c_lflag &= ~(ICANON | ECHO);
+ local_term_attributes.c_cc[VMIN] = 1;
+ local_term_attributes.c_cc[VTIME] = 0;
+
+ if (tcsetattr(STDIN_FILENO, TCSANOW, &local_term_attributes)) {
+ fprintf(stderr, "Could not set terminal attributes!\n");
+ return;
+ }
+
+ term_attributes_were_set = 1;
+}
+
+static void os_exit_line_edit_mode(void)
+{
+
+ if (!term_attributes_were_set) {
+ return;
+ }
+
+ /* Set terminal attributes back to the original values */
+
+ if (tcsetattr(STDIN_FILENO, TCSANOW, &original_term_attributes)) {
+ fprintf(stderr, "Could not restore terminal attributes!\n");
+ }
+}
+
+#else
+
+/* These functions are not needed for other ACPICA utilities */
+
+#define os_enter_line_edit_mode()
+#define os_exit_line_edit_mode()
+#endif
+
+/******************************************************************************
+ *
+ * FUNCTION: acpi_os_initialize, acpi_os_terminate
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Initialize and terminate this module.
+ *
+ *****************************************************************************/
+
+acpi_status acpi_os_initialize(void)
+{
+ acpi_status status;
+
+ acpi_gbl_output_file = stdout;
+
+ os_enter_line_edit_mode();
+
+ status = acpi_os_create_lock(&acpi_gbl_print_lock);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+
+ return (AE_OK);
+}
+
+acpi_status acpi_os_terminate(void)
+{
+
+ os_exit_line_edit_mode();
+ return (AE_OK);
+}
+
+#ifndef ACPI_USE_NATIVE_RSDP_POINTER
+/******************************************************************************
+ *
+ * FUNCTION: acpi_os_get_root_pointer
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: RSDP physical address
+ *
+ * DESCRIPTION: Gets the ACPI root pointer (RSDP)
+ *
+ *****************************************************************************/
+
+acpi_physical_address acpi_os_get_root_pointer(void)
+{
+
+ return (0);
+}
+#endif
+
+/******************************************************************************
+ *
+ * FUNCTION: acpi_os_predefined_override
+ *
+ * PARAMETERS: init_val - Initial value of the predefined object
+ * new_val - The new value for the object
+ *
+ * RETURN: Status, pointer to value. Null pointer returned if not
+ * overriding.
+ *
+ * DESCRIPTION: Allow the OS to override predefined names
+ *
+ *****************************************************************************/
+
+acpi_status
+acpi_os_predefined_override(const struct acpi_predefined_names * init_val,
+ acpi_string * new_val)
+{
+
+ if (!init_val || !new_val) {
+ return (AE_BAD_PARAMETER);
+ }
+
+ *new_val = NULL;
+ return (AE_OK);
+}
+
+/******************************************************************************
+ *
+ * FUNCTION: acpi_os_table_override
+ *
+ * PARAMETERS: existing_table - Header of current table (probably
+ * firmware)
+ * new_table - Where an entire new table is returned.
+ *
+ * RETURN: Status, pointer to new table. Null pointer returned if no
+ * table is available to override
+ *
+ * DESCRIPTION: Return a different version of a table if one is available
+ *
+ *****************************************************************************/
+
+acpi_status
+acpi_os_table_override(struct acpi_table_header * existing_table,
+ struct acpi_table_header ** new_table)
+{
+
+ if (!existing_table || !new_table) {
+ return (AE_BAD_PARAMETER);
+ }
+
+ *new_table = NULL;
+
+#ifdef ACPI_EXEC_APP
+
+ ae_table_override(existing_table, new_table);
+ return (AE_OK);
+#else
+
+ return (AE_NO_ACPI_TABLES);
+#endif
+}
+
+/******************************************************************************
+ *
+ * FUNCTION: acpi_os_physical_table_override
+ *
+ * PARAMETERS: existing_table - Header of current table (probably firmware)
+ * new_address - Where new table address is returned
+ * (Physical address)
+ * new_table_length - Where new table length is returned
+ *
+ * RETURN: Status, address/length of new table. Null pointer returned
+ * if no table is available to override.
+ *
+ * DESCRIPTION: Returns AE_SUPPORT, function not used in user space.
+ *
+ *****************************************************************************/
+
+acpi_status
+acpi_os_physical_table_override(struct acpi_table_header * existing_table,
+ acpi_physical_address * new_address,
+ u32 *new_table_length)
+{
+
+ return (AE_SUPPORT);
+}
+
+/******************************************************************************
+ *
+ * FUNCTION: acpi_os_redirect_output
+ *
+ * PARAMETERS: destination - An open file handle/pointer
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Causes redirect of acpi_os_printf and acpi_os_vprintf
+ *
+ *****************************************************************************/
+
+void acpi_os_redirect_output(void *destination)
+{
+
+ acpi_gbl_output_file = destination;
+}
+
+/******************************************************************************
+ *
+ * FUNCTION: acpi_os_printf
+ *
+ * PARAMETERS: fmt, ... - Standard printf format
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Formatted output. Note: very similar to acpi_os_vprintf
+ * (performance), changes should be tracked in both functions.
+ *
+ *****************************************************************************/
+
+void ACPI_INTERNAL_VAR_XFACE acpi_os_printf(const char *fmt, ...)
+{
+ va_list args;
+ u8 flags;
+
+ flags = acpi_gbl_db_output_flags;
+ if (flags & ACPI_DB_REDIRECTABLE_OUTPUT) {
+
+ /* Output is directable to either a file (if open) or the console */
+
+ if (acpi_gbl_debug_file) {
+
+ /* Output file is open, send the output there */
+
+ va_start(args, fmt);
+ vfprintf(acpi_gbl_debug_file, fmt, args);
+ va_end(args);
+ } else {
+ /* No redirection, send output to console (once only!) */
+
+ flags |= ACPI_DB_CONSOLE_OUTPUT;
+ }
+ }
+
+ if (flags & ACPI_DB_CONSOLE_OUTPUT) {
+ va_start(args, fmt);
+ vfprintf(acpi_gbl_output_file, fmt, args);
+ va_end(args);
+ }
+}
+
+/******************************************************************************
+ *
+ * FUNCTION: acpi_os_vprintf
+ *
+ * PARAMETERS: fmt - Standard printf format
+ * args - Argument list
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Formatted output with argument list pointer. Note: very
+ * similar to acpi_os_printf, changes should be tracked in both
+ * functions.
+ *
+ *****************************************************************************/
+
+void acpi_os_vprintf(const char *fmt, va_list args)
+{
+ u8 flags;
+ char buffer[ACPI_VPRINTF_BUFFER_SIZE];
+
+ /*
+ * We build the output string in a local buffer because we may be
+ * outputting the buffer twice. Using vfprintf is problematic because
+ * some implementations modify the args pointer/structure during
+ * execution. Thus, we use the local buffer for portability.
+ *
+ * Note: Since this module is intended for use by the various ACPICA
+ * utilities/applications, we can safely declare the buffer on the stack.
+ * Also, This function is used for relatively small error messages only.
+ */
+ vsnprintf(buffer, ACPI_VPRINTF_BUFFER_SIZE, fmt, args);
+
+ flags = acpi_gbl_db_output_flags;
+ if (flags & ACPI_DB_REDIRECTABLE_OUTPUT) {
+
+ /* Output is directable to either a file (if open) or the console */
+
+ if (acpi_gbl_debug_file) {
+
+ /* Output file is open, send the output there */
+
+ fputs(buffer, acpi_gbl_debug_file);
+ } else {
+ /* No redirection, send output to console (once only!) */
+
+ flags |= ACPI_DB_CONSOLE_OUTPUT;
+ }
+ }
+
+ if (flags & ACPI_DB_CONSOLE_OUTPUT) {
+ fputs(buffer, acpi_gbl_output_file);
+ }
+}
+
+#ifndef ACPI_EXEC_APP
+/******************************************************************************
+ *
+ * FUNCTION: acpi_os_get_line
+ *
+ * PARAMETERS: buffer - Where to return the command line
+ * buffer_length - Maximum length of Buffer
+ * bytes_read - Where the actual byte count is returned
+ *
+ * RETURN: Status and actual bytes read
+ *
+ * DESCRIPTION: Get the next input line from the terminal. NOTE: For the
+ * acpi_exec utility, we use the acgetline module instead to
+ * provide line-editing and history support.
+ *
+ *****************************************************************************/
+
+acpi_status acpi_os_get_line(char *buffer, u32 buffer_length, u32 *bytes_read)
+{
+ int input_char;
+ u32 end_of_line;
+
+ /* Standard acpi_os_get_line for all utilities except acpi_exec */
+
+ for (end_of_line = 0;; end_of_line++) {
+ if (end_of_line >= buffer_length) {
+ return (AE_BUFFER_OVERFLOW);
+ }
+
+ if ((input_char = getchar()) == EOF) {
+ return (AE_ERROR);
+ }
+
+ if (!input_char || input_char == _ASCII_NEWLINE) {
+ break;
+ }
+
+ buffer[end_of_line] = (char)input_char;
+ }
+
+ /* Null terminate the buffer */
+
+ buffer[end_of_line] = 0;
+
+ /* Return the number of bytes in the string */
+
+ if (bytes_read) {
+ *bytes_read = end_of_line;
+ }
+
+ return (AE_OK);
+}
+#endif
+
+#ifndef ACPI_USE_NATIVE_MEMORY_MAPPING
+/******************************************************************************
+ *
+ * FUNCTION: acpi_os_map_memory
+ *
+ * PARAMETERS: where - Physical address of memory to be mapped
+ * length - How much memory to map
+ *
+ * RETURN: Pointer to mapped memory. Null on error.
+ *
+ * DESCRIPTION: Map physical memory into caller's address space
+ *
+ *****************************************************************************/
+
+void *acpi_os_map_memory(acpi_physical_address where, acpi_size length)
+{
+
+ return (ACPI_TO_POINTER((acpi_size) where));
+}
+
+/******************************************************************************
+ *
+ * FUNCTION: acpi_os_unmap_memory
+ *
+ * PARAMETERS: where - Logical address of memory to be unmapped
+ * length - How much memory to unmap
+ *
+ * RETURN: None.
+ *
+ * DESCRIPTION: Delete a previously created mapping. Where and Length must
+ * correspond to a previous mapping exactly.
+ *
+ *****************************************************************************/
+
+void acpi_os_unmap_memory(void *where, acpi_size length)
+{
+
+ return;
+}
+#endif
+
+/******************************************************************************
+ *
+ * FUNCTION: acpi_os_allocate
+ *
+ * PARAMETERS: size - Amount to allocate, in bytes
+ *
+ * RETURN: Pointer to the new allocation. Null on error.
+ *
+ * DESCRIPTION: Allocate memory. Algorithm is dependent on the OS.
+ *
+ *****************************************************************************/
+
+void *acpi_os_allocate(acpi_size size)
+{
+ void *mem;
+
+ mem = (void *)malloc((size_t) size);
+ return (mem);
+}
+
+#ifdef USE_NATIVE_ALLOCATE_ZEROED
+/******************************************************************************
+ *
+ * FUNCTION: acpi_os_allocate_zeroed
+ *
+ * PARAMETERS: size - Amount to allocate, in bytes
+ *
+ * RETURN: Pointer to the new allocation. Null on error.
+ *
+ * DESCRIPTION: Allocate and zero memory. Algorithm is dependent on the OS.
+ *
+ *****************************************************************************/
+
+void *acpi_os_allocate_zeroed(acpi_size size)
+{
+ void *mem;
+
+ mem = (void *)calloc(1, (size_t) size);
+ return (mem);
+}
+#endif
+
+/******************************************************************************
+ *
+ * FUNCTION: acpi_os_free
+ *
+ * PARAMETERS: mem - Pointer to previously allocated memory
+ *
+ * RETURN: None.
+ *
+ * DESCRIPTION: Free memory allocated via acpi_os_allocate
+ *
+ *****************************************************************************/
+
+void acpi_os_free(void *mem)
+{
+
+ free(mem);
+}
+
+#ifdef ACPI_SINGLE_THREADED
+/******************************************************************************
+ *
+ * FUNCTION: Semaphore stub functions
+ *
+ * DESCRIPTION: Stub functions used for single-thread applications that do
+ * not require semaphore synchronization. Full implementations
+ * of these functions appear after the stubs.
+ *
+ *****************************************************************************/
+
+acpi_status
+acpi_os_create_semaphore(u32 max_units,
+ u32 initial_units, acpi_handle * out_handle)
+{
+ *out_handle = (acpi_handle) 1;
+ return (AE_OK);
+}
+
+acpi_status acpi_os_delete_semaphore(acpi_handle handle)
+{
+ return (AE_OK);
+}
+
+acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout)
+{
+ return (AE_OK);
+}
+
+acpi_status acpi_os_signal_semaphore(acpi_handle handle, u32 units)
+{
+ return (AE_OK);
+}
+
+#else
+/******************************************************************************
+ *
+ * FUNCTION: acpi_os_create_semaphore
+ *
+ * PARAMETERS: initial_units - Units to be assigned to the new semaphore
+ * out_handle - Where a handle will be returned
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Create an OS semaphore
+ *
+ *****************************************************************************/
+
+acpi_status
+acpi_os_create_semaphore(u32 max_units,
+ u32 initial_units, acpi_handle * out_handle)
+{
+ sem_t *sem;
+
+ if (!out_handle) {
+ return (AE_BAD_PARAMETER);
+ }
+#ifdef __APPLE__
+ {
+ char *semaphore_name = tmpnam(NULL);
+
+ sem =
+ sem_open(semaphore_name, O_EXCL | O_CREAT, 0755,
+ initial_units);
+ if (!sem) {
+ return (AE_NO_MEMORY);
+ }
+ sem_unlink(semaphore_name); /* This just deletes the name */
+ }
+
+#else
+ sem = acpi_os_allocate(sizeof(sem_t));
+ if (!sem) {
+ return (AE_NO_MEMORY);
+ }
+
+ if (sem_init(sem, 0, initial_units) == -1) {
+ acpi_os_free(sem);
+ return (AE_BAD_PARAMETER);
+ }
+#endif
+
+ *out_handle = (acpi_handle) sem;
+ return (AE_OK);
+}
+
+/******************************************************************************
+ *
+ * FUNCTION: acpi_os_delete_semaphore
+ *
+ * PARAMETERS: handle - Handle returned by acpi_os_create_semaphore
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Delete an OS semaphore
+ *
+ *****************************************************************************/
+
+acpi_status acpi_os_delete_semaphore(acpi_handle handle)
+{
+ sem_t *sem = (sem_t *) handle;
+
+ if (!sem) {
+ return (AE_BAD_PARAMETER);
+ }
+
+ if (sem_destroy(sem) == -1) {
+ return (AE_BAD_PARAMETER);
+ }
+
+ return (AE_OK);
+}
+
+/******************************************************************************
+ *
+ * FUNCTION: acpi_os_wait_semaphore
+ *
+ * PARAMETERS: handle - Handle returned by acpi_os_create_semaphore
+ * units - How many units to wait for
+ * msec_timeout - How long to wait (milliseconds)
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Wait for units
+ *
+ *****************************************************************************/
+
+acpi_status
+acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 msec_timeout)
+{
+ acpi_status status = AE_OK;
+ sem_t *sem = (sem_t *) handle;
+#ifndef ACPI_USE_ALTERNATE_TIMEOUT
+ struct timespec time;
+ int ret_val;
+#endif
+
+ if (!sem) {
+ return (AE_BAD_PARAMETER);
+ }
+
+ switch (msec_timeout) {
+ /*
+ * No Wait:
+ * --------
+ * A zero timeout value indicates that we shouldn't wait - just
+ * acquire the semaphore if available otherwise return AE_TIME
+ * (a.k.a. 'would block').
+ */
+ case 0:
+
+ if (sem_trywait(sem) == -1) {
+ status = (AE_TIME);
+ }
+ break;
+
+ /* Wait Indefinitely */
+
+ case ACPI_WAIT_FOREVER:
+
+ if (sem_wait(sem)) {
+ status = (AE_TIME);
+ }
+ break;
+
+ /* Wait with msec_timeout */
+
+ default:
+
+#ifdef ACPI_USE_ALTERNATE_TIMEOUT
+ /*
+ * Alternate timeout mechanism for environments where
+ * sem_timedwait is not available or does not work properly.
+ */
+ while (msec_timeout) {
+ if (sem_trywait(sem) == 0) {
+
+ /* Got the semaphore */
+ return (AE_OK);
+ }
+
+ if (msec_timeout >= 10) {
+ msec_timeout -= 10;
+ usleep(10 * ACPI_USEC_PER_MSEC); /* ten milliseconds */
+ } else {
+ msec_timeout--;
+ usleep(ACPI_USEC_PER_MSEC); /* one millisecond */
+ }
+ }
+ status = (AE_TIME);
+#else
+ /*
+ * The interface to sem_timedwait is an absolute time, so we need to
+ * get the current time, then add in the millisecond Timeout value.
+ */
+ if (clock_gettime(CLOCK_REALTIME, &time) == -1) {
+ perror("clock_gettime");
+ return (AE_TIME);
+ }
+
+ time.tv_sec += (msec_timeout / ACPI_MSEC_PER_SEC);
+ time.tv_nsec +=
+ ((msec_timeout % ACPI_MSEC_PER_SEC) * ACPI_NSEC_PER_MSEC);
+
+ /* Handle nanosecond overflow (field must be less than one second) */
+
+ if (time.tv_nsec >= ACPI_NSEC_PER_SEC) {
+ time.tv_sec += (time.tv_nsec / ACPI_NSEC_PER_SEC);
+ time.tv_nsec = (time.tv_nsec % ACPI_NSEC_PER_SEC);
+ }
+
+ while (((ret_val = sem_timedwait(sem, &time)) == -1)
+ && (errno == EINTR)) {
+ continue;
+ }
+
+ if (ret_val != 0) {
+ if (errno != ETIMEDOUT) {
+ perror("sem_timedwait");
+ }
+ status = (AE_TIME);
+ }
+#endif
+ break;
+ }
+
+ return (status);
+}
+
+/******************************************************************************
+ *
+ * FUNCTION: acpi_os_signal_semaphore
+ *
+ * PARAMETERS: handle - Handle returned by acpi_os_create_semaphore
+ * units - Number of units to send
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Send units
+ *
+ *****************************************************************************/
+
+acpi_status acpi_os_signal_semaphore(acpi_handle handle, u32 units)
+{
+ sem_t *sem = (sem_t *) handle;
+
+ if (!sem) {
+ return (AE_BAD_PARAMETER);
+ }
+
+ if (sem_post(sem) == -1) {
+ return (AE_LIMIT);
+ }
+
+ return (AE_OK);
+}
+
+#endif /* ACPI_SINGLE_THREADED */
+
+/******************************************************************************
+ *
+ * FUNCTION: Spinlock interfaces
+ *
+ * DESCRIPTION: Map these interfaces to semaphore interfaces
+ *
+ *****************************************************************************/
+
+acpi_status acpi_os_create_lock(acpi_spinlock * out_handle)
+{
+
+ return (acpi_os_create_semaphore(1, 1, out_handle));
+}
+
+void acpi_os_delete_lock(acpi_spinlock handle)
+{
+ acpi_os_delete_semaphore(handle);
+}
+
+acpi_cpu_flags acpi_os_acquire_lock(acpi_handle handle)
+{
+ acpi_os_wait_semaphore(handle, 1, 0xFFFF);
+ return (0);
+}
+
+void acpi_os_release_lock(acpi_spinlock handle, acpi_cpu_flags flags)
+{
+ acpi_os_signal_semaphore(handle, 1);
+}
+
+/******************************************************************************
+ *
+ * FUNCTION: acpi_os_install_interrupt_handler
+ *
+ * PARAMETERS: interrupt_number - Level handler should respond to.
+ * isr - Address of the ACPI interrupt handler
+ * except_ptr - Where status is returned
+ *
+ * RETURN: Handle to the newly installed handler.
+ *
+ * DESCRIPTION: Install an interrupt handler. Used to install the ACPI
+ * OS-independent handler.
+ *
+ *****************************************************************************/
+
+u32
+acpi_os_install_interrupt_handler(u32 interrupt_number,
+ acpi_osd_handler service_routine,
+ void *context)
+{
+
+ return (AE_OK);
+}
+
+/******************************************************************************
+ *
+ * FUNCTION: acpi_os_remove_interrupt_handler
+ *
+ * PARAMETERS: handle - Returned when handler was installed
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Uninstalls an interrupt handler.
+ *
+ *****************************************************************************/
+
+acpi_status
+acpi_os_remove_interrupt_handler(u32 interrupt_number,
+ acpi_osd_handler service_routine)
+{
+
+ return (AE_OK);
+}
+
+/******************************************************************************
+ *
+ * FUNCTION: acpi_os_stall
+ *
+ * PARAMETERS: microseconds - Time to sleep
+ *
+ * RETURN: Blocks until sleep is completed.
+ *
+ * DESCRIPTION: Sleep at microsecond granularity
+ *
+ *****************************************************************************/
+
+void acpi_os_stall(u32 microseconds)
+{
+
+ if (microseconds) {
+ usleep(microseconds);
+ }
+}
+
+/******************************************************************************
+ *
+ * FUNCTION: acpi_os_sleep
+ *
+ * PARAMETERS: milliseconds - Time to sleep
+ *
+ * RETURN: Blocks until sleep is completed.
+ *
+ * DESCRIPTION: Sleep at millisecond granularity
+ *
+ *****************************************************************************/
+
+void acpi_os_sleep(u64 milliseconds)
+{
+
+ /* Sleep for whole seconds */
+
+ sleep(milliseconds / ACPI_MSEC_PER_SEC);
+
+ /*
+ * Sleep for remaining microseconds.
+ * Arg to usleep() is in usecs and must be less than 1,000,000 (1 second).
+ */
+ usleep((milliseconds % ACPI_MSEC_PER_SEC) * ACPI_USEC_PER_MSEC);
+}
+
+/******************************************************************************
+ *
+ * FUNCTION: acpi_os_get_timer
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: Current time in 100 nanosecond units
+ *
+ * DESCRIPTION: Get the current system time
+ *
+ *****************************************************************************/
+
+u64 acpi_os_get_timer(void)
+{
+ struct timeval time;
+
+ /* This timer has sufficient resolution for user-space application code */
+
+ gettimeofday(&time, NULL);
+
+ /* (Seconds * 10^7 = 100ns(10^-7)) + (Microseconds(10^-6) * 10^1 = 100ns) */
+
+ return (((u64)time.tv_sec * ACPI_100NSEC_PER_SEC) +
+ ((u64)time.tv_usec * ACPI_100NSEC_PER_USEC));
+}
+
+/******************************************************************************
+ *
+ * FUNCTION: acpi_os_read_pci_configuration
+ *
+ * PARAMETERS: pci_id - Seg/Bus/Dev
+ * pci_register - Device Register
+ * value - Buffer where value is placed
+ * width - Number of bits
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Read data from PCI configuration space
+ *
+ *****************************************************************************/
+
+acpi_status
+acpi_os_read_pci_configuration(struct acpi_pci_id *pci_id,
+ u32 pci_register, u64 *value, u32 width)
+{
+
+ *value = 0;
+ return (AE_OK);
+}
+
+/******************************************************************************
+ *
+ * FUNCTION: acpi_os_write_pci_configuration
+ *
+ * PARAMETERS: pci_id - Seg/Bus/Dev
+ * pci_register - Device Register
+ * value - Value to be written
+ * width - Number of bits
+ *
+ * RETURN: Status.
+ *
+ * DESCRIPTION: Write data to PCI configuration space
+ *
+ *****************************************************************************/
+
+acpi_status
+acpi_os_write_pci_configuration(struct acpi_pci_id * pci_id,
+ u32 pci_register, u64 value, u32 width)
+{
+
+ return (AE_OK);
+}
+
+/******************************************************************************
+ *
+ * FUNCTION: acpi_os_read_port
+ *
+ * PARAMETERS: address - Address of I/O port/register to read
+ * value - Where value is placed
+ * width - Number of bits
+ *
+ * RETURN: Value read from port
+ *
+ * DESCRIPTION: Read data from an I/O port or register
+ *
+ *****************************************************************************/
+
+acpi_status acpi_os_read_port(acpi_io_address address, u32 *value, u32 width)
+{
+
+ switch (width) {
+ case 8:
+
+ *value = 0xFF;
+ break;
+
+ case 16:
+
+ *value = 0xFFFF;
+ break;
+
+ case 32:
+
+ *value = 0xFFFFFFFF;
+ break;
+
+ default:
+
+ return (AE_BAD_PARAMETER);
+ }
+
+ return (AE_OK);
+}
+
+/******************************************************************************
+ *
+ * FUNCTION: acpi_os_write_port
+ *
+ * PARAMETERS: address - Address of I/O port/register to write
+ * value - Value to write
+ * width - Number of bits
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Write data to an I/O port or register
+ *
+ *****************************************************************************/
+
+acpi_status acpi_os_write_port(acpi_io_address address, u32 value, u32 width)
+{
+
+ return (AE_OK);
+}
+
+/******************************************************************************
+ *
+ * FUNCTION: acpi_os_read_memory
+ *
+ * PARAMETERS: address - Physical Memory Address to read
+ * value - Where value is placed
+ * width - Number of bits (8,16,32, or 64)
+ *
+ * RETURN: Value read from physical memory address. Always returned
+ * as a 64-bit integer, regardless of the read width.
+ *
+ * DESCRIPTION: Read data from a physical memory address
+ *
+ *****************************************************************************/
+
+acpi_status
+acpi_os_read_memory(acpi_physical_address address, u64 *value, u32 width)
+{
+
+ switch (width) {
+ case 8:
+ case 16:
+ case 32:
+ case 64:
+
+ *value = 0;
+ break;
+
+ default:
+
+ return (AE_BAD_PARAMETER);
+ }
+ return (AE_OK);
+}
+
+/******************************************************************************
+ *
+ * FUNCTION: acpi_os_write_memory
+ *
+ * PARAMETERS: address - Physical Memory Address to write
+ * value - Value to write
+ * width - Number of bits (8,16,32, or 64)
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Write data to a physical memory address
+ *
+ *****************************************************************************/
+
+acpi_status
+acpi_os_write_memory(acpi_physical_address address, u64 value, u32 width)
+{
+
+ return (AE_OK);
+}
+
+/******************************************************************************
+ *
+ * FUNCTION: acpi_os_readable
+ *
+ * PARAMETERS: pointer - Area to be verified
+ * length - Size of area
+ *
+ * RETURN: TRUE if readable for entire length
+ *
+ * DESCRIPTION: Verify that a pointer is valid for reading
+ *
+ *****************************************************************************/
+
+u8 acpi_os_readable(void *pointer, acpi_size length)
+{
+
+ return (TRUE);
+}
+
+/******************************************************************************
+ *
+ * FUNCTION: acpi_os_writable
+ *
+ * PARAMETERS: pointer - Area to be verified
+ * length - Size of area
+ *
+ * RETURN: TRUE if writable for entire length
+ *
+ * DESCRIPTION: Verify that a pointer is valid for writing
+ *
+ *****************************************************************************/
+
+u8 acpi_os_writable(void *pointer, acpi_size length)
+{
+
+ return (TRUE);
+}
+
+/******************************************************************************
+ *
+ * FUNCTION: acpi_os_signal
+ *
+ * PARAMETERS: function - ACPI A signal function code
+ * info - Pointer to function-dependent structure
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Miscellaneous functions. Example implementation only.
+ *
+ *****************************************************************************/
+
+acpi_status acpi_os_signal(u32 function, void *info)
+{
+
+ switch (function) {
+ case ACPI_SIGNAL_FATAL:
+
+ break;
+
+ case ACPI_SIGNAL_BREAKPOINT:
+
+ break;
+
+ default:
+
+ break;
+ }
+
+ return (AE_OK);
+}
+
+/* Optional multi-thread support */
+
+#ifndef ACPI_SINGLE_THREADED
+/******************************************************************************
+ *
+ * FUNCTION: acpi_os_get_thread_id
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: Id of the running thread
+ *
+ * DESCRIPTION: Get the ID of the current (running) thread
+ *
+ *****************************************************************************/
+
+acpi_thread_id acpi_os_get_thread_id(void)
+{
+ pthread_t thread;
+
+ thread = pthread_self();
+ return (ACPI_CAST_PTHREAD_T(thread));
+}
+
+/******************************************************************************
+ *
+ * FUNCTION: acpi_os_execute
+ *
+ * PARAMETERS: type - Type of execution
+ * function - Address of the function to execute
+ * context - Passed as a parameter to the function
+ *
+ * RETURN: Status.
+ *
+ * DESCRIPTION: Execute a new thread
+ *
+ *****************************************************************************/
+
+acpi_status
+acpi_os_execute(acpi_execute_type type,
+ acpi_osd_exec_callback function, void *context)
+{
+ pthread_t thread;
+ int ret;
+
+ ret =
+ pthread_create(&thread, NULL, (PTHREAD_CALLBACK) function, context);
+ if (ret) {
+ acpi_os_printf("Create thread failed");
+ }
+ return (0);
+}
+
+#else /* ACPI_SINGLE_THREADED */
+acpi_thread_id acpi_os_get_thread_id(void)
+{
+ return (1);
+}
+
+acpi_status
+acpi_os_execute(acpi_execute_type type,
+ acpi_osd_exec_callback function, void *context)
+{
+
+ function(context);
+
+ return (AE_OK);
+}
+
+#endif /* ACPI_SINGLE_THREADED */
+
+/******************************************************************************
+ *
+ * FUNCTION: acpi_os_wait_events_complete
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Wait for all asynchronous events to complete. This
+ * implementation does nothing.
+ *
+ *****************************************************************************/
+
+void acpi_os_wait_events_complete(void)
+{
+ return;
+}
diff --git a/tools/power/acpi/tools/acpidump/acpidump.h b/tools/power/acpi/tools/acpidump/acpidump.h
index 46f5195..a2d37d6 100644
--- a/tools/power/acpi/tools/acpidump/acpidump.h
+++ b/tools/power/acpi/tools/acpidump/acpidump.h
@@ -47,7 +47,6 @@
#ifdef _DECLARE_GLOBALS
#define EXTERN
#define INIT_GLOBAL(a,b) a=b
-#define DEFINE_ACPI_GLOBALS 1
#else
#define EXTERN extern
#define INIT_GLOBAL(a,b) a
@@ -69,7 +68,7 @@
EXTERN u8 INIT_GLOBAL(gbl_binary_mode, FALSE);
EXTERN u8 INIT_GLOBAL(gbl_dump_customized_tables, FALSE);
EXTERN u8 INIT_GLOBAL(gbl_do_not_dump_xsdt, FALSE);
-EXTERN FILE INIT_GLOBAL(*gbl_output_file, NULL);
+EXTERN ACPI_FILE INIT_GLOBAL(gbl_output_file, NULL);
EXTERN char INIT_GLOBAL(*gbl_output_filename, NULL);
EXTERN u64 INIT_GLOBAL(gbl_rsdp_base, 0);
diff --git a/tools/power/acpi/tools/acpidump/apdump.c b/tools/power/acpi/tools/acpidump/apdump.c
index 3cac123..53cee78 100644
--- a/tools/power/acpi/tools/acpidump/apdump.c
+++ b/tools/power/acpi/tools/acpidump/apdump.c
@@ -69,17 +69,16 @@
/* Make sure signature is all ASCII and a valid ACPI name */
if (!acpi_ut_valid_acpi_name(table->signature)) {
- fprintf(stderr,
- "Table signature (0x%8.8X) is invalid\n",
- *(u32 *)table->signature);
+ acpi_log_error("Table signature (0x%8.8X) is invalid\n",
+ *(u32 *)table->signature);
return (FALSE);
}
/* Check for minimum table length */
if (table->length < sizeof(struct acpi_table_header)) {
- fprintf(stderr, "Table length (0x%8.8X) is invalid\n",
- table->length);
+ acpi_log_error("Table length (0x%8.8X) is invalid\n",
+ table->length);
return (FALSE);
}
}
@@ -116,8 +115,8 @@
}
if (ACPI_FAILURE(status)) {
- fprintf(stderr, "%4.4s: Warning: wrong checksum in table\n",
- table->signature);
+ acpi_log_error("%4.4s: Warning: wrong checksum in table\n",
+ table->signature);
}
return (AE_OK);
@@ -196,12 +195,13 @@
* Note: simplest to just always emit a 64-bit address. acpi_xtract
* utility can handle this.
*/
- printf("%4.4s @ 0x%8.8X%8.8X\n", table->signature,
- ACPI_FORMAT_UINT64(address));
+ acpi_ut_file_printf(gbl_output_file, "%4.4s @ 0x%8.8X%8.8X\n",
+ table->signature, ACPI_FORMAT_UINT64(address));
- acpi_ut_dump_buffer(ACPI_CAST_PTR(u8, table), table_length,
- DB_BYTE_DISPLAY, 0);
- printf("\n");
+ acpi_ut_dump_buffer_to_file(gbl_output_file,
+ ACPI_CAST_PTR(u8, table), table_length,
+ DB_BYTE_DISPLAY, 0);
+ acpi_ut_file_printf(gbl_output_file, "\n");
return (0);
}
@@ -239,20 +239,20 @@
if (status == AE_LIMIT) {
return (0);
} else if (i == 0) {
- fprintf(stderr,
- "Could not get ACPI tables, %s\n",
- acpi_format_exception(status));
+ acpi_log_error
+ ("Could not get ACPI tables, %s\n",
+ acpi_format_exception(status));
return (-1);
} else {
- fprintf(stderr,
- "Could not get ACPI table at index %u, %s\n",
- i, acpi_format_exception(status));
+ acpi_log_error
+ ("Could not get ACPI table at index %u, %s\n",
+ i, acpi_format_exception(status));
continue;
}
}
table_status = ap_dump_table_buffer(table, instance, address);
- free(table);
+ ACPI_FREE(table);
if (table_status) {
break;
@@ -288,22 +288,22 @@
status = acpi_ut_strtoul64(ascii_address, 0, &long_address);
if (ACPI_FAILURE(status)) {
- fprintf(stderr, "%s: Could not convert to a physical address\n",
- ascii_address);
+ acpi_log_error("%s: Could not convert to a physical address\n",
+ ascii_address);
return (-1);
}
address = (acpi_physical_address) long_address;
status = acpi_os_get_table_by_address(address, &table);
if (ACPI_FAILURE(status)) {
- fprintf(stderr, "Could not get table at 0x%8.8X%8.8X, %s\n",
- ACPI_FORMAT_UINT64(address),
- acpi_format_exception(status));
+ acpi_log_error("Could not get table at 0x%8.8X%8.8X, %s\n",
+ ACPI_FORMAT_UINT64(address),
+ acpi_format_exception(status));
return (-1);
}
table_status = ap_dump_table_buffer(table, 0, address);
- free(table);
+ ACPI_FREE(table);
return (table_status);
}
@@ -329,24 +329,24 @@
acpi_status status;
int table_status;
- if (strlen(signature) != ACPI_NAME_SIZE) {
- fprintf(stderr,
- "Invalid table signature [%s]: must be exactly 4 characters\n",
- signature);
+ if (ACPI_STRLEN(signature) != ACPI_NAME_SIZE) {
+ acpi_log_error
+ ("Invalid table signature [%s]: must be exactly 4 characters\n",
+ signature);
return (-1);
}
/* Table signatures are expected to be uppercase */
- strcpy(local_signature, signature);
+ ACPI_STRCPY(local_signature, signature);
acpi_ut_strupr(local_signature);
/* To be friendly, handle tables whose signatures do not match the name */
if (ACPI_COMPARE_NAME(local_signature, "FADT")) {
- strcpy(local_signature, ACPI_SIG_FADT);
+ ACPI_STRCPY(local_signature, ACPI_SIG_FADT);
} else if (ACPI_COMPARE_NAME(local_signature, "MADT")) {
- strcpy(local_signature, ACPI_SIG_MADT);
+ ACPI_STRCPY(local_signature, ACPI_SIG_MADT);
}
/* Dump all instances of this signature (to handle multiple SSDTs) */
@@ -362,14 +362,14 @@
return (0);
}
- fprintf(stderr,
- "Could not get ACPI table with signature [%s], %s\n",
- local_signature, acpi_format_exception(status));
+ acpi_log_error
+ ("Could not get ACPI table with signature [%s], %s\n",
+ local_signature, acpi_format_exception(status));
return (-1);
}
table_status = ap_dump_table_buffer(table, instance, address);
- free(table);
+ ACPI_FREE(table);
if (table_status) {
break;
@@ -409,43 +409,21 @@
/* File must be at least as long as the table length */
if (table->length > file_size) {
- fprintf(stderr,
- "Table length (0x%X) is too large for input file (0x%X) %s\n",
- table->length, file_size, pathname);
+ acpi_log_error
+ ("Table length (0x%X) is too large for input file (0x%X) %s\n",
+ table->length, file_size, pathname);
goto exit;
}
if (gbl_verbose_mode) {
- fprintf(stderr,
- "Input file: %s contains table [%4.4s], 0x%X (%u) bytes\n",
- pathname, table->signature, file_size, file_size);
+ acpi_log_error
+ ("Input file: %s contains table [%4.4s], 0x%X (%u) bytes\n",
+ pathname, table->signature, file_size, file_size);
}
table_status = ap_dump_table_buffer(table, 0, 0);
exit:
- free(table);
+ ACPI_FREE(table);
return (table_status);
}
-
-/******************************************************************************
- *
- * FUNCTION: acpi_os* print functions
- *
- * DESCRIPTION: Used for linkage with ACPICA modules
- *
- ******************************************************************************/
-
-void ACPI_INTERNAL_VAR_XFACE acpi_os_printf(const char *fmt, ...)
-{
- va_list args;
-
- va_start(args, fmt);
- vfprintf(stdout, fmt, args);
- va_end(args);
-}
-
-void acpi_os_vprintf(const char *fmt, va_list args)
-{
- vfprintf(stdout, fmt, args);
-}
diff --git a/tools/power/acpi/tools/acpidump/apfiles.c b/tools/power/acpi/tools/acpidump/apfiles.c
index 4488acc..d470046 100644
--- a/tools/power/acpi/tools/acpidump/apfiles.c
+++ b/tools/power/acpi/tools/acpidump/apfiles.c
@@ -44,6 +44,27 @@
#include "acpidump.h"
#include "acapps.h"
+/* Local prototypes */
+
+static int ap_is_existing_file(char *pathname);
+
+static int ap_is_existing_file(char *pathname)
+{
+#ifndef _GNU_EFI
+ struct stat stat_info;
+
+ if (!stat(pathname, &stat_info)) {
+ acpi_log_error("Target path already exists, overwrite? [y|n] ");
+
+ if (getchar() != 'y') {
+ return (-1);
+ }
+ }
+#endif
+
+ return 0;
+}
+
/******************************************************************************
*
* FUNCTION: ap_open_output_file
@@ -59,25 +80,19 @@
int ap_open_output_file(char *pathname)
{
- struct stat stat_info;
- FILE *file;
+ ACPI_FILE file;
/* If file exists, prompt for overwrite */
- if (!stat(pathname, &stat_info)) {
- fprintf(stderr,
- "Target path already exists, overwrite? [y|n] ");
-
- if (getchar() != 'y') {
- return (-1);
- }
+ if (ap_is_existing_file(pathname) != 0) {
+ return (-1);
}
/* Point stdout to the file */
- file = freopen(pathname, "w", stdout);
+ file = acpi_os_open_file(pathname, ACPI_FILE_WRITING);
if (!file) {
- perror("Could not open output file");
+ acpi_log_error("Could not open output file: %s\n", pathname);
return (-1);
}
@@ -106,7 +121,7 @@
{
char filename[ACPI_NAME_SIZE + 16];
char instance_str[16];
- FILE *file;
+ ACPI_FILE file;
size_t actual;
u32 table_length;
@@ -130,35 +145,37 @@
/* Handle multiple SSDts - create different filenames for each */
if (instance > 0) {
- sprintf(instance_str, "%u", instance);
- strcat(filename, instance_str);
+ acpi_ut_snprintf(instance_str, sizeof(instance_str), "%u",
+ instance);
+ ACPI_STRCAT(filename, instance_str);
}
- strcat(filename, ACPI_TABLE_FILE_SUFFIX);
+ ACPI_STRCAT(filename, ACPI_TABLE_FILE_SUFFIX);
if (gbl_verbose_mode) {
- fprintf(stderr,
- "Writing [%4.4s] to binary file: %s 0x%X (%u) bytes\n",
- table->signature, filename, table->length,
- table->length);
+ acpi_log_error
+ ("Writing [%4.4s] to binary file: %s 0x%X (%u) bytes\n",
+ table->signature, filename, table->length, table->length);
}
/* Open the file and dump the entire table in binary mode */
- file = fopen(filename, "wb");
+ file = acpi_os_open_file(filename,
+ ACPI_FILE_WRITING | ACPI_FILE_BINARY);
if (!file) {
- perror("Could not open output file");
+ acpi_log_error("Could not open output file: %s\n", filename);
return (-1);
}
- actual = fwrite(table, 1, table_length, file);
+ actual = acpi_os_write_file(file, table, 1, table_length);
if (actual != table_length) {
- perror("Error writing binary output file");
- fclose(file);
+ acpi_log_error("Error writing binary output file: %s\n",
+ filename);
+ acpi_os_close_file(file);
return (-1);
}
- fclose(file);
+ acpi_os_close_file(file);
return (0);
}
@@ -179,15 +196,16 @@
u32 *out_file_size)
{
struct acpi_table_header *buffer = NULL;
- FILE *file;
+ ACPI_FILE file;
u32 file_size;
size_t actual;
/* Must use binary mode */
- file = fopen(pathname, "rb");
+ file =
+ acpi_os_open_file(pathname, ACPI_FILE_READING | ACPI_FILE_BINARY);
if (!file) {
- perror("Could not open input file");
+ acpi_log_error("Could not open input file: %s\n", pathname);
return (NULL);
}
@@ -195,27 +213,25 @@
file_size = cm_get_file_size(file);
if (file_size == ACPI_UINT32_MAX) {
- fprintf(stderr,
- "Could not get input file size: %s\n", pathname);
+ acpi_log_error("Could not get input file size: %s\n", pathname);
goto cleanup;
}
/* Allocate a buffer for the entire file */
- buffer = calloc(1, file_size);
+ buffer = ACPI_ALLOCATE_ZEROED(file_size);
if (!buffer) {
- fprintf(stderr,
- "Could not allocate file buffer of size: %u\n",
- file_size);
+ acpi_log_error("Could not allocate file buffer of size: %u\n",
+ file_size);
goto cleanup;
}
/* Read the entire file */
- actual = fread(buffer, 1, file_size, file);
+ actual = acpi_os_read_file(file, buffer, 1, file_size);
if (actual != file_size) {
- fprintf(stderr, "Could not read input file: %s\n", pathname);
- free(buffer);
+ acpi_log_error("Could not read input file: %s\n", pathname);
+ ACPI_FREE(buffer);
buffer = NULL;
goto cleanup;
}
@@ -223,6 +239,6 @@
*out_file_size = file_size;
cleanup:
- fclose(file);
+ acpi_os_close_file(file);
return (buffer);
}
diff --git a/tools/power/acpi/tools/acpidump/apmain.c b/tools/power/acpi/tools/acpidump/apmain.c
index 51e8d63..853b4da 100644
--- a/tools/power/acpi/tools/acpidump/apmain.c
+++ b/tools/power/acpi/tools/acpidump/apmain.c
@@ -72,7 +72,7 @@
static int ap_do_options(int argc, char **argv);
-static void ap_insert_action(char *argument, u32 to_be_done);
+static int ap_insert_action(char *argument, u32 to_be_done);
/* Table for deferred actions from command line options */
@@ -104,7 +104,7 @@
ACPI_OPTION("-v", "Display version information");
ACPI_OPTION("-z", "Verbose mode");
- printf("\nTable Options:\n");
+ ACPI_USAGE_TEXT("\nTable Options:\n");
ACPI_OPTION("-a <Address>", "Get table via a physical address");
ACPI_OPTION("-f <BinaryFile>", "Get table via a binary file");
@@ -112,9 +112,9 @@
ACPI_OPTION("-x", "Do not use but dump XSDT");
ACPI_OPTION("-x -x", "Do not use or dump XSDT");
- printf("\n"
- "Invocation without parameters dumps all available tables\n"
- "Multiple mixed instances of -a, -f, and -n are supported\n\n");
+ ACPI_USAGE_TEXT("\n"
+ "Invocation without parameters dumps all available tables\n"
+ "Multiple mixed instances of -a, -f, and -n are supported\n\n");
}
/******************************************************************************
@@ -124,13 +124,13 @@
* PARAMETERS: argument - Pointer to the argument for this action
* to_be_done - What to do to process this action
*
- * RETURN: None. Exits program if action table becomes full.
+ * RETURN: Status
*
* DESCRIPTION: Add an action item to the action table
*
******************************************************************************/
-static void ap_insert_action(char *argument, u32 to_be_done)
+static int ap_insert_action(char *argument, u32 to_be_done)
{
/* Insert action and check for table overflow */
@@ -140,10 +140,12 @@
current_action++;
if (current_action > AP_MAX_ACTIONS) {
- fprintf(stderr, "Too many table options (max %u)\n",
- AP_MAX_ACTIONS);
- exit(-1);
+ acpi_log_error("Too many table options (max %u)\n",
+ AP_MAX_ACTIONS);
+ return (-1);
}
+
+ return (0);
}
/******************************************************************************
@@ -166,7 +168,8 @@
/* Command line options */
- while ((j = acpi_getopt(argc, argv, AP_SUPPORTED_OPTIONS)) != EOF)
+ while ((j =
+ acpi_getopt(argc, argv, AP_SUPPORTED_OPTIONS)) != ACPI_OPT_END)
switch (j) {
/*
* Global options
@@ -185,12 +188,12 @@
case '?':
ap_display_usage();
- exit(0);
+ return (1);
case 'o': /* Redirect output to a single file */
if (ap_open_output_file(acpi_gbl_optarg)) {
- exit(-1);
+ return (-1);
}
continue;
@@ -200,10 +203,10 @@
acpi_ut_strtoul64(acpi_gbl_optarg, 0,
&gbl_rsdp_base);
if (ACPI_FAILURE(status)) {
- fprintf(stderr,
- "%s: Could not convert to a physical address\n",
- acpi_gbl_optarg);
- exit(-1);
+ acpi_log_error
+ ("%s: Could not convert to a physical address\n",
+ acpi_gbl_optarg);
+ return (-1);
}
continue;
@@ -223,13 +226,13 @@
case 'v': /* Revision/version */
- printf(ACPI_COMMON_SIGNON(AP_UTILITY_NAME));
- exit(0);
+ acpi_os_printf(ACPI_COMMON_SIGNON(AP_UTILITY_NAME));
+ return (1);
case 'z': /* Verbose mode */
gbl_verbose_mode = TRUE;
- fprintf(stderr, ACPI_COMMON_SIGNON(AP_UTILITY_NAME));
+ acpi_log_error(ACPI_COMMON_SIGNON(AP_UTILITY_NAME));
continue;
/*
@@ -237,32 +240,40 @@
*/
case 'a': /* Get table by physical address */
- ap_insert_action(acpi_gbl_optarg,
- AP_DUMP_TABLE_BY_ADDRESS);
+ if (ap_insert_action
+ (acpi_gbl_optarg, AP_DUMP_TABLE_BY_ADDRESS)) {
+ return (-1);
+ }
break;
case 'f': /* Get table from a file */
- ap_insert_action(acpi_gbl_optarg,
- AP_DUMP_TABLE_BY_FILE);
+ if (ap_insert_action
+ (acpi_gbl_optarg, AP_DUMP_TABLE_BY_FILE)) {
+ return (-1);
+ }
break;
case 'n': /* Get table by input name (signature) */
- ap_insert_action(acpi_gbl_optarg,
- AP_DUMP_TABLE_BY_NAME);
+ if (ap_insert_action
+ (acpi_gbl_optarg, AP_DUMP_TABLE_BY_NAME)) {
+ return (-1);
+ }
break;
default:
ap_display_usage();
- exit(-1);
+ return (-1);
}
/* If there are no actions, this means "get/dump all tables" */
if (current_action == 0) {
- ap_insert_action(NULL, AP_DUMP_ALL_TABLES);
+ if (ap_insert_action(NULL, AP_DUMP_ALL_TABLES)) {
+ return (-1);
+ }
}
return (0);
@@ -280,7 +291,11 @@
*
******************************************************************************/
+#ifndef _GNU_EFI
int ACPI_SYSTEM_XFACE main(int argc, char *argv[])
+#else
+int ACPI_SYSTEM_XFACE acpi_main(int argc, char *argv[])
+#endif
{
int status = 0;
struct ap_dump_action *action;
@@ -288,11 +303,17 @@
u32 i;
ACPI_DEBUG_INITIALIZE(); /* For debug version only */
+ acpi_os_initialize();
+ gbl_output_file = ACPI_FILE_OUT;
/* Process command line options */
- if (ap_do_options(argc, argv)) {
- return (-1);
+ status = ap_do_options(argc, argv);
+ if (status > 0) {
+ return (0);
+ }
+ if (status < 0) {
+ return (status);
}
/* Get/dump ACPI table(s) as requested */
@@ -322,9 +343,8 @@
default:
- fprintf(stderr,
- "Internal error, invalid action: 0x%X\n",
- action->to_be_done);
+ acpi_log_error("Internal error, invalid action: 0x%X\n",
+ action->to_be_done);
return (-1);
}
@@ -333,18 +353,18 @@
}
}
- if (gbl_output_file) {
+ if (gbl_output_filename) {
if (gbl_verbose_mode) {
/* Summary for the output file */
file_size = cm_get_file_size(gbl_output_file);
- fprintf(stderr,
- "Output file %s contains 0x%X (%u) bytes\n\n",
- gbl_output_filename, file_size, file_size);
+ acpi_log_error
+ ("Output file %s contains 0x%X (%u) bytes\n\n",
+ gbl_output_filename, file_size, file_size);
}
- fclose(gbl_output_file);
+ acpi_os_close_file(gbl_output_file);
}
return (status);