Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull additional x86 fixes from Peter Anvin.
* 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86, amd, xen: Avoid NULL pointer paravirt references
x86, mtrr: Fix a type overflow in range_to_mtrr func
x86, realmode: Unbreak the ia64 build of drivers/acpi/sleep.c
x86/mm/pat: Improve scaling of pat_pagerange_is_ram()
x86: hpet: Fix copy-and-paste mistake in earlier change
x86/mce: Fix 32-bit build
x86/bitops: Move BIT_64() for a wider use
diff --git a/Documentation/ABI/testing/sysfs-bus-rbd b/Documentation/ABI/testing/sysfs-bus-rbd
index dbedafb..bcd88eb 100644
--- a/Documentation/ABI/testing/sysfs-bus-rbd
+++ b/Documentation/ABI/testing/sysfs-bus-rbd
@@ -65,11 +65,11 @@
Entries under /sys/bus/rbd/devices/<dev-id>/snap_<snap-name>
-------------------------------------------------------------
-id
+snap_id
The rados internal snapshot id assigned for this snapshot
-size
+snap_size
The size of the image when this snapshot was taken.
diff --git a/Documentation/devicetree/bindings/iommu/nvidia,tegra20-gart.txt b/Documentation/devicetree/bindings/iommu/nvidia,tegra20-gart.txt
new file mode 100644
index 0000000..099d936
--- /dev/null
+++ b/Documentation/devicetree/bindings/iommu/nvidia,tegra20-gart.txt
@@ -0,0 +1,14 @@
+NVIDIA Tegra 20 GART
+
+Required properties:
+- compatible: "nvidia,tegra20-gart"
+- reg: Two pairs of cells specifying the physical address and size of
+ the memory controller registers and the GART aperture respectively.
+
+Example:
+
+ gart {
+ compatible = "nvidia,tegra20-gart";
+ reg = <0x7000f024 0x00000018 /* controller registers */
+ 0x58000000 0x02000000>; /* GART aperture */
+ };
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index ebaffe2..56000b3 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -606,3 +606,9 @@
Who: Ludovic Desroches <ludovic.desroches@atmel.com>
----------------------------
+
+What: net/wanrouter/
+When: June 2013
+Why: Unsupported/unmaintained/unused since 2.6
+
+----------------------------
diff --git a/Documentation/i2c/functionality b/Documentation/i2c/functionality
index 42c17c1..b0ff2ab 100644
--- a/Documentation/i2c/functionality
+++ b/Documentation/i2c/functionality
@@ -18,9 +18,9 @@
adapters typically can not do these)
I2C_FUNC_10BIT_ADDR Handles the 10-bit address extensions
I2C_FUNC_PROTOCOL_MANGLING Knows about the I2C_M_IGNORE_NAK,
- I2C_M_REV_DIR_ADDR, I2C_M_NOSTART and
- I2C_M_NO_RD_ACK flags (which modify the
- I2C protocol!)
+ I2C_M_REV_DIR_ADDR and I2C_M_NO_RD_ACK
+ flags (which modify the I2C protocol!)
+ I2C_FUNC_NOSTART Can skip repeated start sequence
I2C_FUNC_SMBUS_QUICK Handles the SMBus write_quick command
I2C_FUNC_SMBUS_READ_BYTE Handles the SMBus read_byte command
I2C_FUNC_SMBUS_WRITE_BYTE Handles the SMBus write_byte command
@@ -50,6 +50,9 @@
emulated by a real I2C adapter (using
the transparent emulation layer)
+In kernel versions prior to 3.5 I2C_FUNC_NOSTART was implemented as
+part of I2C_FUNC_PROTOCOL_MANGLING.
+
ADAPTER IMPLEMENTATION
----------------------
diff --git a/Documentation/i2c/i2c-protocol b/Documentation/i2c/i2c-protocol
index 10518dd..0b3e62d 100644
--- a/Documentation/i2c/i2c-protocol
+++ b/Documentation/i2c/i2c-protocol
@@ -49,7 +49,9 @@
Modified transactions
=====================
-We have found some I2C devices that needs the following modifications:
+The following modifications to the I2C protocol can also be generated,
+with the exception of I2C_M_NOSTART these are usually only needed to
+work around device issues:
Flag I2C_M_NOSTART:
In a combined transaction, no 'S Addr Wr/Rd [A]' is generated at some
@@ -60,6 +62,11 @@
we do not generate Addr, but we do generate the startbit S. This will
probably confuse all other clients on your bus, so don't try this.
+ This is often used to gather transmits from multiple data buffers in
+ system memory into something that appears as a single transfer to the
+ I2C device but may also be used between direction changes by some
+ rare devices.
+
Flags I2C_M_REV_DIR_ADDR
This toggles the Rd/Wr flag. That is, if you want to do a write, but
need to emit an Rd instead of a Wr, or vice versa, you set this
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index b40b413..c45513d 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -335,6 +335,12 @@
requirements as needed. This option
does not override iommu=pt
+ amd_iommu_dump= [HW,X86-64]
+ Enable AMD IOMMU driver option to dump the ACPI table
+ for AMD IOMMU. With this option enabled, AMD IOMMU
+ driver will print ACPI tables for AMD IOMMU during
+ IOMMU initialization.
+
amijoy.map= [HW,JOY] Amiga joystick support
Map of devices attached to JOY0DAT and JOY1DAT
Format: <a>,<b>
diff --git a/Documentation/watchdog/watchdog-kernel-api.txt b/Documentation/watchdog/watchdog-kernel-api.txt
index 25fe430..086638f 100644
--- a/Documentation/watchdog/watchdog-kernel-api.txt
+++ b/Documentation/watchdog/watchdog-kernel-api.txt
@@ -1,6 +1,6 @@
The Linux WatchDog Timer Driver Core kernel API.
===============================================
-Last reviewed: 16-Mar-2012
+Last reviewed: 22-May-2012
Wim Van Sebroeck <wim@iguana.be>
@@ -39,6 +39,10 @@
The watchdog device structure looks like this:
struct watchdog_device {
+ int id;
+ struct cdev cdev;
+ struct device *dev;
+ struct device *parent;
const struct watchdog_info *info;
const struct watchdog_ops *ops;
unsigned int bootstatus;
@@ -46,10 +50,20 @@
unsigned int min_timeout;
unsigned int max_timeout;
void *driver_data;
+ struct mutex lock;
unsigned long status;
};
It contains following fields:
+* id: set by watchdog_register_device, id 0 is special. It has both a
+ /dev/watchdog0 cdev (dynamic major, minor 0) as well as the old
+ /dev/watchdog miscdev. The id is set automatically when calling
+ watchdog_register_device.
+* cdev: cdev for the dynamic /dev/watchdog<id> device nodes. This
+ field is also populated by watchdog_register_device.
+* dev: device under the watchdog class (created by watchdog_register_device).
+* parent: set this to the parent device (or NULL) before calling
+ watchdog_register_device.
* info: a pointer to a watchdog_info structure. This structure gives some
additional information about the watchdog timer itself. (Like it's unique name)
* ops: a pointer to the list of watchdog operations that the watchdog supports.
@@ -61,6 +75,7 @@
* driver_data: a pointer to the drivers private data of a watchdog device.
This data should only be accessed via the watchdog_set_drvdata and
watchdog_get_drvdata routines.
+* lock: Mutex for WatchDog Timer Driver Core internal use only.
* status: this field contains a number of status bits that give extra
information about the status of the device (Like: is the watchdog timer
running/active, is the nowayout bit set, is the device opened via
@@ -78,6 +93,8 @@
unsigned int (*status)(struct watchdog_device *);
int (*set_timeout)(struct watchdog_device *, unsigned int);
unsigned int (*get_timeleft)(struct watchdog_device *);
+ void (*ref)(struct watchdog_device *);
+ void (*unref)(struct watchdog_device *);
long (*ioctl)(struct watchdog_device *, unsigned int, unsigned long);
};
@@ -85,6 +102,21 @@
driver's operations. This module owner will be used to lock the module when
the watchdog is active. (This to avoid a system crash when you unload the
module and /dev/watchdog is still open).
+
+If the watchdog_device struct is dynamically allocated, just locking the module
+is not enough and a driver also needs to define the ref and unref operations to
+ensure the structure holding the watchdog_device does not go away.
+
+The simplest (and usually sufficient) implementation of this is to:
+1) Add a kref struct to the same structure which is holding the watchdog_device
+2) Define a release callback for the kref which frees the struct holding both
+3) Call kref_init on this kref *before* calling watchdog_register_device()
+4) Define a ref operation calling kref_get on this kref
+5) Define a unref operation calling kref_put on this kref
+6) When it is time to cleanup:
+ * Do not kfree() the struct holding both, the last kref_put will do this!
+ * *After* calling watchdog_unregister_device() call kref_put on the kref
+
Some operations are mandatory and some are optional. The mandatory operations
are:
* start: this is a pointer to the routine that starts the watchdog timer
@@ -125,6 +157,10 @@
(Note: the WDIOF_SETTIMEOUT needs to be set in the options field of the
watchdog's info structure).
* get_timeleft: this routines returns the time that's left before a reset.
+* ref: the operation that calls kref_get on the kref of a dynamically
+ allocated watchdog_device struct.
+* unref: the operation that calls kref_put on the kref of a dynamically
+ allocated watchdog_device struct.
* ioctl: if this routine is present then it will be called first before we do
our own internal ioctl call handling. This routine should return -ENOIOCTLCMD
if a command is not supported. The parameters that are passed to the ioctl
@@ -144,6 +180,11 @@
(This bit should only be used by the WatchDog Timer Driver Core).
* WDOG_NO_WAY_OUT: this bit stores the nowayout setting for the watchdog.
If this bit is set then the watchdog timer will not be able to stop.
+* WDOG_UNREGISTERED: this bit gets set by the WatchDog Timer Driver Core
+ after calling watchdog_unregister_device, and then checked before calling
+ any watchdog_ops, so that you can be sure that no operations (other then
+ unref) will get called after unregister, even if userspace still holds a
+ reference to /dev/watchdog
To set the WDOG_NO_WAY_OUT status bit (before registering your watchdog
timer device) you can either:
diff --git a/Documentation/watchdog/watchdog-parameters.txt b/Documentation/watchdog/watchdog-parameters.txt
index 17ddd82..04fddbac 100644
--- a/Documentation/watchdog/watchdog-parameters.txt
+++ b/Documentation/watchdog/watchdog-parameters.txt
@@ -78,6 +78,11 @@
wd1_timeout: Default watchdog1 timeout in 1/10secs
wd2_timeout: Default watchdog2 timeout in 1/10secs
-------------------------------------------------
+da9052wdt:
+timeout: Watchdog timeout in seconds. 2<= timeout <=131, default=2.048s
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+-------------------------------------------------
davinci_wdt:
heartbeat: Watchdog heartbeat period in seconds from 1 to 600, default 60
-------------------------------------------------
diff --git a/MAINTAINERS b/MAINTAINERS
index a246490..8cad55b 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2818,6 +2818,12 @@
F: drivers/base/firmware*.c
F: include/linux/firmware.h
+FLOPPY DRIVER
+M: Jiri Kosina <jkosina@suse.cz>
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/jikos/floppy.git
+S: Odd fixes
+F: drivers/block/floppy.c
+
FPU EMULATOR
M: Bill Metzenthen <billm@melbpc.org.au>
W: http://floatingpoint.sourceforge.net/emulator/index.html
@@ -6651,7 +6657,7 @@
F: kernel/taskstats.c
TC CLASSIFIER
-M: Jamal Hadi Salim <hadi@cyberus.ca>
+M: Jamal Hadi Salim <jhs@mojatatu.com>
L: netdev@vger.kernel.org
S: Maintained
F: include/linux/pkt_cls.h
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 5e76013..b649c59 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -525,7 +525,7 @@
select ARCH_HAS_DMA_SET_COHERENT_MASK
select CLKSRC_MMIO
select CPU_XSCALE
- select GENERIC_GPIO
+ select ARCH_REQUIRE_GPIOLIB
select GENERIC_CLOCKEVENTS
select MIGHT_HAVE_PCI
select NEED_MACH_IO_H
diff --git a/arch/arm/boot/dts/exynos5250.dtsi b/arch/arm/boot/dts/exynos5250.dtsi
index 5ca0cdb..4272b29 100644
--- a/arch/arm/boot/dts/exynos5250.dtsi
+++ b/arch/arm/boot/dts/exynos5250.dtsi
@@ -30,6 +30,22 @@
reg = <0x10481000 0x1000>, <0x10482000 0x2000>;
};
+ combiner:interrupt-controller@10440000 {
+ compatible = "samsung,exynos4210-combiner";
+ #interrupt-cells = <2>;
+ interrupt-controller;
+ samsung,combiner-nr = <32>;
+ reg = <0x10440000 0x1000>;
+ interrupts = <0 0 0>, <0 1 0>, <0 2 0>, <0 3 0>,
+ <0 4 0>, <0 5 0>, <0 6 0>, <0 7 0>,
+ <0 8 0>, <0 9 0>, <0 10 0>, <0 11 0>,
+ <0 12 0>, <0 13 0>, <0 14 0>, <0 15 0>,
+ <0 16 0>, <0 17 0>, <0 18 0>, <0 19 0>,
+ <0 20 0>, <0 21 0>, <0 22 0>, <0 23 0>,
+ <0 24 0>, <0 25 0>, <0 26 0>, <0 27 0>,
+ <0 28 0>, <0 29 0>, <0 30 0>, <0 31 0>;
+ };
+
watchdog {
compatible = "samsung,s3c2410-wdt";
reg = <0x101D0000 0x100>;
diff --git a/arch/arm/boot/dts/lpc32xx.dtsi b/arch/arm/boot/dts/lpc32xx.dtsi
index 2d69686..3f5dad8 100644
--- a/arch/arm/boot/dts/lpc32xx.dtsi
+++ b/arch/arm/boot/dts/lpc32xx.dtsi
@@ -215,45 +215,8 @@
gpio: gpio@40028000 {
compatible = "nxp,lpc3220-gpio";
reg = <0x40028000 0x1000>;
- /* create a private address space for enumeration */
- #address-cells = <1>;
- #size-cells = <0>;
-
- gpio_p0: gpio-bank@0 {
- gpio-controller;
- #gpio-cells = <2>;
- reg = <0>;
- };
-
- gpio_p1: gpio-bank@1 {
- gpio-controller;
- #gpio-cells = <2>;
- reg = <1>;
- };
-
- gpio_p2: gpio-bank@2 {
- gpio-controller;
- #gpio-cells = <2>;
- reg = <2>;
- };
-
- gpio_p3: gpio-bank@3 {
- gpio-controller;
- #gpio-cells = <2>;
- reg = <3>;
- };
-
- gpi_p3: gpio-bank@4 {
- gpio-controller;
- #gpio-cells = <2>;
- reg = <4>;
- };
-
- gpo_p3: gpio-bank@5 {
- gpio-controller;
- #gpio-cells = <2>;
- reg = <5>;
- };
+ gpio-controller;
+ #gpio-cells = <3>; /* bank, pin, flags */
};
watchdog@4003C000 {
diff --git a/arch/arm/boot/dts/phy3250.dts b/arch/arm/boot/dts/phy3250.dts
index 0167e86..c4ff6d1 100644
--- a/arch/arm/boot/dts/phy3250.dts
+++ b/arch/arm/boot/dts/phy3250.dts
@@ -131,13 +131,13 @@
compatible = "gpio-leds";
led0 {
- gpios = <&gpo_p3 1 1>; /* GPO_P3 1, GPIO 80, active low */
+ gpios = <&gpio 5 1 1>; /* GPO_P3 1, GPIO 80, active low */
linux,default-trigger = "heartbeat";
default-state = "off";
};
led1 {
- gpios = <&gpo_p3 14 1>; /* GPO_P3 14, GPIO 93, active low */
+ gpios = <&gpio 5 14 1>; /* GPO_P3 14, GPIO 93, active low */
linux,default-trigger = "timer";
default-state = "off";
};
diff --git a/arch/arm/boot/dts/vexpress-v2p-ca15-tc1.dts b/arch/arm/boot/dts/vexpress-v2p-ca15-tc1.dts
index 941b161..7e1091d 100644
--- a/arch/arm/boot/dts/vexpress-v2p-ca15-tc1.dts
+++ b/arch/arm/boot/dts/vexpress-v2p-ca15-tc1.dts
@@ -73,7 +73,10 @@
#address-cells = <0>;
interrupt-controller;
reg = <0x2c001000 0x1000>,
- <0x2c002000 0x100>;
+ <0x2c002000 0x1000>,
+ <0x2c004000 0x2000>,
+ <0x2c006000 0x2000>;
+ interrupts = <1 9 0xf04>;
};
memory-controller@7ffd0000 {
@@ -93,6 +96,14 @@
<0 91 4>;
};
+ timer {
+ compatible = "arm,armv7-timer";
+ interrupts = <1 13 0xf08>,
+ <1 14 0xf08>,
+ <1 11 0xf08>,
+ <1 10 0xf08>;
+ };
+
pmu {
compatible = "arm,cortex-a15-pmu", "arm,cortex-a9-pmu";
interrupts = <0 68 4>,
diff --git a/arch/arm/boot/dts/vexpress-v2p-ca5s.dts b/arch/arm/boot/dts/vexpress-v2p-ca5s.dts
index 6905e66d..18917a0 100644
--- a/arch/arm/boot/dts/vexpress-v2p-ca5s.dts
+++ b/arch/arm/boot/dts/vexpress-v2p-ca5s.dts
@@ -77,13 +77,18 @@
timer@2c000600 {
compatible = "arm,cortex-a5-twd-timer";
- reg = <0x2c000600 0x38>;
- interrupts = <1 2 0x304>,
- <1 3 0x304>;
+ reg = <0x2c000600 0x20>;
+ interrupts = <1 13 0x304>;
+ };
+
+ watchdog@2c000620 {
+ compatible = "arm,cortex-a5-twd-wdt";
+ reg = <0x2c000620 0x20>;
+ interrupts = <1 14 0x304>;
};
gic: interrupt-controller@2c001000 {
- compatible = "arm,corex-a5-gic", "arm,cortex-a9-gic";
+ compatible = "arm,cortex-a5-gic", "arm,cortex-a9-gic";
#interrupt-cells = <3>;
#address-cells = <0>;
interrupt-controller;
diff --git a/arch/arm/boot/dts/vexpress-v2p-ca9.dts b/arch/arm/boot/dts/vexpress-v2p-ca9.dts
index da77869..3f0c736 100644
--- a/arch/arm/boot/dts/vexpress-v2p-ca9.dts
+++ b/arch/arm/boot/dts/vexpress-v2p-ca9.dts
@@ -105,8 +105,13 @@
timer@1e000600 {
compatible = "arm,cortex-a9-twd-timer";
reg = <0x1e000600 0x20>;
- interrupts = <1 2 0xf04>,
- <1 3 0xf04>;
+ interrupts = <1 13 0xf04>;
+ };
+
+ watchdog@1e000620 {
+ compatible = "arm,cortex-a9-twd-wdt";
+ reg = <0x1e000620 0x20>;
+ interrupts = <1 14 0xf04>;
};
gic: interrupt-controller@1e001000 {
diff --git a/arch/arm/mach-exynos/Kconfig b/arch/arm/mach-exynos/Kconfig
index 43ebe90..573be57 100644
--- a/arch/arm/mach-exynos/Kconfig
+++ b/arch/arm/mach-exynos/Kconfig
@@ -62,6 +62,8 @@
default y
depends on ARCH_EXYNOS5
select SAMSUNG_DMADEV
+ select S5P_PM if PM
+ select S5P_SLEEP if PM
help
Enable EXYNOS5250 SoC support
diff --git a/arch/arm/mach-exynos/Makefile b/arch/arm/mach-exynos/Makefile
index 440a637..9b58024 100644
--- a/arch/arm/mach-exynos/Makefile
+++ b/arch/arm/mach-exynos/Makefile
@@ -22,7 +22,7 @@
obj-$(CONFIG_PM_GENERIC_DOMAINS) += pm_domains.o
obj-$(CONFIG_CPU_IDLE) += cpuidle.o
-obj-$(CONFIG_ARCH_EXYNOS4) += pmu.o
+obj-$(CONFIG_ARCH_EXYNOS) += pmu.o
obj-$(CONFIG_SMP) += platsmp.o headsmp.o
diff --git a/arch/arm/mach-exynos/clock-exynos5.c b/arch/arm/mach-exynos/clock-exynos5.c
index 5aa460b..fefa336 100644
--- a/arch/arm/mach-exynos/clock-exynos5.c
+++ b/arch/arm/mach-exynos/clock-exynos5.c
@@ -30,7 +30,56 @@
#ifdef CONFIG_PM_SLEEP
static struct sleep_save exynos5_clock_save[] = {
- /* will be implemented */
+ SAVE_ITEM(EXYNOS5_CLKSRC_MASK_TOP),
+ SAVE_ITEM(EXYNOS5_CLKSRC_MASK_GSCL),
+ SAVE_ITEM(EXYNOS5_CLKSRC_MASK_DISP1_0),
+ SAVE_ITEM(EXYNOS5_CLKSRC_MASK_FSYS),
+ SAVE_ITEM(EXYNOS5_CLKSRC_MASK_MAUDIO),
+ SAVE_ITEM(EXYNOS5_CLKSRC_MASK_PERIC0),
+ SAVE_ITEM(EXYNOS5_CLKSRC_MASK_PERIC1),
+ SAVE_ITEM(EXYNOS5_CLKGATE_IP_GSCL),
+ SAVE_ITEM(EXYNOS5_CLKGATE_IP_DISP1),
+ SAVE_ITEM(EXYNOS5_CLKGATE_IP_MFC),
+ SAVE_ITEM(EXYNOS5_CLKGATE_IP_G3D),
+ SAVE_ITEM(EXYNOS5_CLKGATE_IP_GEN),
+ SAVE_ITEM(EXYNOS5_CLKGATE_IP_FSYS),
+ SAVE_ITEM(EXYNOS5_CLKGATE_IP_PERIC),
+ SAVE_ITEM(EXYNOS5_CLKGATE_IP_PERIS),
+ SAVE_ITEM(EXYNOS5_CLKGATE_BLOCK),
+ SAVE_ITEM(EXYNOS5_CLKDIV_TOP0),
+ SAVE_ITEM(EXYNOS5_CLKDIV_TOP1),
+ SAVE_ITEM(EXYNOS5_CLKDIV_GSCL),
+ SAVE_ITEM(EXYNOS5_CLKDIV_DISP1_0),
+ SAVE_ITEM(EXYNOS5_CLKDIV_GEN),
+ SAVE_ITEM(EXYNOS5_CLKDIV_MAUDIO),
+ SAVE_ITEM(EXYNOS5_CLKDIV_FSYS0),
+ SAVE_ITEM(EXYNOS5_CLKDIV_FSYS1),
+ SAVE_ITEM(EXYNOS5_CLKDIV_FSYS2),
+ SAVE_ITEM(EXYNOS5_CLKDIV_FSYS3),
+ SAVE_ITEM(EXYNOS5_CLKDIV_PERIC0),
+ SAVE_ITEM(EXYNOS5_CLKDIV_PERIC1),
+ SAVE_ITEM(EXYNOS5_CLKDIV_PERIC2),
+ SAVE_ITEM(EXYNOS5_CLKDIV_PERIC3),
+ SAVE_ITEM(EXYNOS5_CLKDIV_PERIC4),
+ SAVE_ITEM(EXYNOS5_CLKDIV_PERIC5),
+ SAVE_ITEM(EXYNOS5_SCLK_DIV_ISP),
+ SAVE_ITEM(EXYNOS5_CLKSRC_TOP0),
+ SAVE_ITEM(EXYNOS5_CLKSRC_TOP1),
+ SAVE_ITEM(EXYNOS5_CLKSRC_TOP2),
+ SAVE_ITEM(EXYNOS5_CLKSRC_TOP3),
+ SAVE_ITEM(EXYNOS5_CLKSRC_GSCL),
+ SAVE_ITEM(EXYNOS5_CLKSRC_DISP1_0),
+ SAVE_ITEM(EXYNOS5_CLKSRC_MAUDIO),
+ SAVE_ITEM(EXYNOS5_CLKSRC_FSYS),
+ SAVE_ITEM(EXYNOS5_CLKSRC_PERIC0),
+ SAVE_ITEM(EXYNOS5_CLKSRC_PERIC1),
+ SAVE_ITEM(EXYNOS5_SCLK_SRC_ISP),
+ SAVE_ITEM(EXYNOS5_EPLL_CON0),
+ SAVE_ITEM(EXYNOS5_EPLL_CON1),
+ SAVE_ITEM(EXYNOS5_EPLL_CON2),
+ SAVE_ITEM(EXYNOS5_VPLL_CON0),
+ SAVE_ITEM(EXYNOS5_VPLL_CON1),
+ SAVE_ITEM(EXYNOS5_VPLL_CON2),
};
#endif
diff --git a/arch/arm/mach-exynos/cpuidle.c b/arch/arm/mach-exynos/cpuidle.c
index 26dac28..cff0595 100644
--- a/arch/arm/mach-exynos/cpuidle.c
+++ b/arch/arm/mach-exynos/cpuidle.c
@@ -100,7 +100,7 @@
exynos4_set_wakeupmask();
/* Set value of power down register for aftr mode */
- exynos4_sys_powerdown_conf(SYS_AFTR);
+ exynos_sys_powerdown_conf(SYS_AFTR);
__raw_writel(virt_to_phys(s3c_cpu_resume), REG_DIRECTGO_ADDR);
__raw_writel(S5P_CHECK_AFTR, REG_DIRECTGO_FLAG);
diff --git a/arch/arm/mach-exynos/include/mach/pm-core.h b/arch/arm/mach-exynos/include/mach/pm-core.h
index 9d8da51e3..a67ecfa 100644
--- a/arch/arm/mach-exynos/include/mach/pm-core.h
+++ b/arch/arm/mach-exynos/include/mach/pm-core.h
@@ -33,7 +33,7 @@
__raw_writel(tmp, S5P_WAKEUP_MASK);
__raw_writel(s3c_irqwake_intmask, S5P_WAKEUP_MASK);
- __raw_writel(s3c_irqwake_eintmask, S5P_EINT_WAKEUP_MASK);
+ __raw_writel(s3c_irqwake_eintmask & 0xFFFFFFFE, S5P_EINT_WAKEUP_MASK);
}
static inline void s3c_pm_arch_stop_clocks(void)
diff --git a/arch/arm/mach-exynos/include/mach/pmu.h b/arch/arm/mach-exynos/include/mach/pmu.h
index e76b7fa..7c27c2d 100644
--- a/arch/arm/mach-exynos/include/mach/pmu.h
+++ b/arch/arm/mach-exynos/include/mach/pmu.h
@@ -23,12 +23,12 @@
};
extern unsigned long l2x0_regs_phys;
-struct exynos4_pmu_conf {
+struct exynos_pmu_conf {
void __iomem *reg;
unsigned int val[NUM_SYS_POWERDOWN];
};
-extern void exynos4_sys_powerdown_conf(enum sys_powerdown mode);
+extern void exynos_sys_powerdown_conf(enum sys_powerdown mode);
extern void s3c_cpu_resume(void);
#endif /* __ASM_ARCH_PMU_H */
diff --git a/arch/arm/mach-exynos/include/mach/regs-clock.h b/arch/arm/mach-exynos/include/mach/regs-clock.h
index b78b5f3..8c9b38c 100644
--- a/arch/arm/mach-exynos/include/mach/regs-clock.h
+++ b/arch/arm/mach-exynos/include/mach/regs-clock.h
@@ -274,36 +274,51 @@
#define EXYNOS5_CLKDIV_ACP EXYNOS_CLKREG(0x08500)
-#define EXYNOS5_CLKSRC_TOP2 EXYNOS_CLKREG(0x10218)
#define EXYNOS5_EPLL_CON0 EXYNOS_CLKREG(0x10130)
#define EXYNOS5_EPLL_CON1 EXYNOS_CLKREG(0x10134)
+#define EXYNOS5_EPLL_CON2 EXYNOS_CLKREG(0x10138)
#define EXYNOS5_VPLL_CON0 EXYNOS_CLKREG(0x10140)
#define EXYNOS5_VPLL_CON1 EXYNOS_CLKREG(0x10144)
+#define EXYNOS5_VPLL_CON2 EXYNOS_CLKREG(0x10148)
#define EXYNOS5_CPLL_CON0 EXYNOS_CLKREG(0x10120)
#define EXYNOS5_CLKSRC_TOP0 EXYNOS_CLKREG(0x10210)
+#define EXYNOS5_CLKSRC_TOP1 EXYNOS_CLKREG(0x10214)
+#define EXYNOS5_CLKSRC_TOP2 EXYNOS_CLKREG(0x10218)
#define EXYNOS5_CLKSRC_TOP3 EXYNOS_CLKREG(0x1021C)
#define EXYNOS5_CLKSRC_GSCL EXYNOS_CLKREG(0x10220)
#define EXYNOS5_CLKSRC_DISP1_0 EXYNOS_CLKREG(0x1022C)
+#define EXYNOS5_CLKSRC_MAUDIO EXYNOS_CLKREG(0x10240)
#define EXYNOS5_CLKSRC_FSYS EXYNOS_CLKREG(0x10244)
#define EXYNOS5_CLKSRC_PERIC0 EXYNOS_CLKREG(0x10250)
+#define EXYNOS5_CLKSRC_PERIC1 EXYNOS_CLKREG(0x10254)
+#define EXYNOS5_SCLK_SRC_ISP EXYNOS_CLKREG(0x10270)
#define EXYNOS5_CLKSRC_MASK_TOP EXYNOS_CLKREG(0x10310)
#define EXYNOS5_CLKSRC_MASK_GSCL EXYNOS_CLKREG(0x10320)
#define EXYNOS5_CLKSRC_MASK_DISP1_0 EXYNOS_CLKREG(0x1032C)
+#define EXYNOS5_CLKSRC_MASK_MAUDIO EXYNOS_CLKREG(0x10334)
#define EXYNOS5_CLKSRC_MASK_FSYS EXYNOS_CLKREG(0x10340)
#define EXYNOS5_CLKSRC_MASK_PERIC0 EXYNOS_CLKREG(0x10350)
+#define EXYNOS5_CLKSRC_MASK_PERIC1 EXYNOS_CLKREG(0x10354)
#define EXYNOS5_CLKDIV_TOP0 EXYNOS_CLKREG(0x10510)
#define EXYNOS5_CLKDIV_TOP1 EXYNOS_CLKREG(0x10514)
#define EXYNOS5_CLKDIV_GSCL EXYNOS_CLKREG(0x10520)
#define EXYNOS5_CLKDIV_DISP1_0 EXYNOS_CLKREG(0x1052C)
#define EXYNOS5_CLKDIV_GEN EXYNOS_CLKREG(0x1053C)
+#define EXYNOS5_CLKDIV_MAUDIO EXYNOS_CLKREG(0x10544)
#define EXYNOS5_CLKDIV_FSYS0 EXYNOS_CLKREG(0x10548)
#define EXYNOS5_CLKDIV_FSYS1 EXYNOS_CLKREG(0x1054C)
#define EXYNOS5_CLKDIV_FSYS2 EXYNOS_CLKREG(0x10550)
#define EXYNOS5_CLKDIV_FSYS3 EXYNOS_CLKREG(0x10554)
#define EXYNOS5_CLKDIV_PERIC0 EXYNOS_CLKREG(0x10558)
+#define EXYNOS5_CLKDIV_PERIC1 EXYNOS_CLKREG(0x1055C)
+#define EXYNOS5_CLKDIV_PERIC2 EXYNOS_CLKREG(0x10560)
+#define EXYNOS5_CLKDIV_PERIC3 EXYNOS_CLKREG(0x10564)
+#define EXYNOS5_CLKDIV_PERIC4 EXYNOS_CLKREG(0x10568)
+#define EXYNOS5_CLKDIV_PERIC5 EXYNOS_CLKREG(0x1056C)
+#define EXYNOS5_SCLK_DIV_ISP EXYNOS_CLKREG(0x10580)
#define EXYNOS5_CLKGATE_IP_ACP EXYNOS_CLKREG(0x08800)
#define EXYNOS5_CLKGATE_IP_ISP0 EXYNOS_CLKREG(0x0C800)
@@ -311,6 +326,7 @@
#define EXYNOS5_CLKGATE_IP_GSCL EXYNOS_CLKREG(0x10920)
#define EXYNOS5_CLKGATE_IP_DISP1 EXYNOS_CLKREG(0x10928)
#define EXYNOS5_CLKGATE_IP_MFC EXYNOS_CLKREG(0x1092C)
+#define EXYNOS5_CLKGATE_IP_G3D EXYNOS_CLKREG(0x10930)
#define EXYNOS5_CLKGATE_IP_GEN EXYNOS_CLKREG(0x10934)
#define EXYNOS5_CLKGATE_IP_FSYS EXYNOS_CLKREG(0x10944)
#define EXYNOS5_CLKGATE_IP_GPS EXYNOS_CLKREG(0x1094C)
diff --git a/arch/arm/mach-exynos/include/mach/regs-pmu.h b/arch/arm/mach-exynos/include/mach/regs-pmu.h
index 4dbb862..43a99e6 100644
--- a/arch/arm/mach-exynos/include/mach/regs-pmu.h
+++ b/arch/arm/mach-exynos/include/mach/regs-pmu.h
@@ -1,9 +1,8 @@
-/* linux/arch/arm/mach-exynos4/include/mach/regs-pmu.h
- *
- * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+/*
+ * Copyright (c) 2010-2012 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
- * EXYNOS4 - Power management unit definition
+ * EXYNOS - Power management unit definition
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -229,4 +228,138 @@
#define S5P_DIS_IRQ_CORE3 S5P_PMUREG(0x1034)
#define S5P_DIS_IRQ_CENTRAL3 S5P_PMUREG(0x1038)
+/* For EXYNOS5 */
+
+#define EXYNOS5_USB_CFG S5P_PMUREG(0x0230)
+
+#define EXYNOS5_ARM_CORE0_SYS_PWR_REG S5P_PMUREG(0x1000)
+#define EXYNOS5_DIS_IRQ_ARM_CORE0_LOCAL_SYS_PWR_REG S5P_PMUREG(0x1004)
+#define EXYNOS5_DIS_IRQ_ARM_CORE0_CENTRAL_SYS_PWR_REG S5P_PMUREG(0x1008)
+#define EXYNOS5_ARM_CORE1_SYS_PWR_REG S5P_PMUREG(0x1010)
+#define EXYNOS5_DIS_IRQ_ARM_CORE1_LOCAL_SYS_PWR_REG S5P_PMUREG(0x1014)
+#define EXYNOS5_DIS_IRQ_ARM_CORE1_CENTRAL_SYS_PWR_REG S5P_PMUREG(0x1018)
+#define EXYNOS5_FSYS_ARM_SYS_PWR_REG S5P_PMUREG(0x1040)
+#define EXYNOS5_DIS_IRQ_FSYS_ARM_CENTRAL_SYS_PWR_REG S5P_PMUREG(0x1048)
+#define EXYNOS5_ISP_ARM_SYS_PWR_REG S5P_PMUREG(0x1050)
+#define EXYNOS5_DIS_IRQ_ISP_ARM_LOCAL_SYS_PWR_REG S5P_PMUREG(0x1054)
+#define EXYNOS5_DIS_IRQ_ISP_ARM_CENTRAL_SYS_PWR_REG S5P_PMUREG(0x1058)
+#define EXYNOS5_ARM_COMMON_SYS_PWR_REG S5P_PMUREG(0x1080)
+#define EXYNOS5_ARM_L2_SYS_PWR_REG S5P_PMUREG(0x10C0)
+#define EXYNOS5_CMU_ACLKSTOP_SYS_PWR_REG S5P_PMUREG(0x1100)
+#define EXYNOS5_CMU_SCLKSTOP_SYS_PWR_REG S5P_PMUREG(0x1104)
+#define EXYNOS5_CMU_RESET_SYS_PWR_REG S5P_PMUREG(0x110C)
+#define EXYNOS5_CMU_ACLKSTOP_SYSMEM_SYS_PWR_REG S5P_PMUREG(0x1120)
+#define EXYNOS5_CMU_SCLKSTOP_SYSMEM_SYS_PWR_REG S5P_PMUREG(0x1124)
+#define EXYNOS5_CMU_RESET_SYSMEM_SYS_PWR_REG S5P_PMUREG(0x112C)
+#define EXYNOS5_DRAM_FREQ_DOWN_SYS_PWR_REG S5P_PMUREG(0x1130)
+#define EXYNOS5_DDRPHY_DLLOFF_SYS_PWR_REG S5P_PMUREG(0x1134)
+#define EXYNOS5_DDRPHY_DLLLOCK_SYS_PWR_REG S5P_PMUREG(0x1138)
+#define EXYNOS5_APLL_SYSCLK_SYS_PWR_REG S5P_PMUREG(0x1140)
+#define EXYNOS5_MPLL_SYSCLK_SYS_PWR_REG S5P_PMUREG(0x1144)
+#define EXYNOS5_VPLL_SYSCLK_SYS_PWR_REG S5P_PMUREG(0x1148)
+#define EXYNOS5_EPLL_SYSCLK_SYS_PWR_REG S5P_PMUREG(0x114C)
+#define EXYNOS5_BPLL_SYSCLK_SYS_PWR_REG S5P_PMUREG(0x1150)
+#define EXYNOS5_CPLL_SYSCLK_SYS_PWR_REG S5P_PMUREG(0x1154)
+#define EXYNOS5_MPLLUSER_SYSCLK_SYS_PWR_REG S5P_PMUREG(0x1164)
+#define EXYNOS5_BPLLUSER_SYSCLK_SYS_PWR_REG S5P_PMUREG(0x1170)
+#define EXYNOS5_TOP_BUS_SYS_PWR_REG S5P_PMUREG(0x1180)
+#define EXYNOS5_TOP_RETENTION_SYS_PWR_REG S5P_PMUREG(0x1184)
+#define EXYNOS5_TOP_PWR_SYS_PWR_REG S5P_PMUREG(0x1188)
+#define EXYNOS5_TOP_BUS_SYSMEM_SYS_PWR_REG S5P_PMUREG(0x1190)
+#define EXYNOS5_TOP_RETENTION_SYSMEM_SYS_PWR_REG S5P_PMUREG(0x1194)
+#define EXYNOS5_TOP_PWR_SYSMEM_SYS_PWR_REG S5P_PMUREG(0x1198)
+#define EXYNOS5_LOGIC_RESET_SYS_PWR_REG S5P_PMUREG(0x11A0)
+#define EXYNOS5_OSCCLK_GATE_SYS_PWR_REG S5P_PMUREG(0x11A4)
+#define EXYNOS5_LOGIC_RESET_SYSMEM_SYS_PWR_REG S5P_PMUREG(0x11B0)
+#define EXYNOS5_OSCCLK_GATE_SYSMEM_SYS_PWR_REG S5P_PMUREG(0x11B4)
+#define EXYNOS5_USBOTG_MEM_SYS_PWR_REG S5P_PMUREG(0x11C0)
+#define EXYNOS5_G2D_MEM_SYS_PWR_REG S5P_PMUREG(0x11C8)
+#define EXYNOS5_USBDRD_MEM_SYS_PWR_REG S5P_PMUREG(0x11CC)
+#define EXYNOS5_SDMMC_MEM_SYS_PWR_REG S5P_PMUREG(0x11D0)
+#define EXYNOS5_CSSYS_MEM_SYS_PWR_REG S5P_PMUREG(0x11D4)
+#define EXYNOS5_SECSS_MEM_SYS_PWR_REG S5P_PMUREG(0x11D8)
+#define EXYNOS5_ROTATOR_MEM_SYS_PWR_REG S5P_PMUREG(0x11DC)
+#define EXYNOS5_INTRAM_MEM_SYS_PWR_REG S5P_PMUREG(0x11E0)
+#define EXYNOS5_INTROM_MEM_SYS_PWR_REG S5P_PMUREG(0x11E4)
+#define EXYNOS5_JPEG_MEM_SYS_PWR_REG S5P_PMUREG(0x11E8)
+#define EXYNOS5_HSI_MEM_SYS_PWR_REG S5P_PMUREG(0x11EC)
+#define EXYNOS5_MCUIOP_MEM_SYS_PWR_REG S5P_PMUREG(0x11F4)
+#define EXYNOS5_SATA_MEM_SYS_PWR_REG S5P_PMUREG(0x11FC)
+#define EXYNOS5_PAD_RETENTION_DRAM_SYS_PWR_REG S5P_PMUREG(0x1200)
+#define EXYNOS5_PAD_RETENTION_MAU_SYS_PWR_REG S5P_PMUREG(0x1204)
+#define EXYNOS5_PAD_RETENTION_EFNAND_SYS_PWR_REG S5P_PMUREG(0x1208)
+#define EXYNOS5_PAD_RETENTION_GPIO_SYS_PWR_REG S5P_PMUREG(0x1220)
+#define EXYNOS5_PAD_RETENTION_UART_SYS_PWR_REG S5P_PMUREG(0x1224)
+#define EXYNOS5_PAD_RETENTION_MMCA_SYS_PWR_REG S5P_PMUREG(0x1228)
+#define EXYNOS5_PAD_RETENTION_MMCB_SYS_PWR_REG S5P_PMUREG(0x122C)
+#define EXYNOS5_PAD_RETENTION_EBIA_SYS_PWR_REG S5P_PMUREG(0x1230)
+#define EXYNOS5_PAD_RETENTION_EBIB_SYS_PWR_REG S5P_PMUREG(0x1234)
+#define EXYNOS5_PAD_RETENTION_SPI_SYS_PWR_REG S5P_PMUREG(0x1238)
+#define EXYNOS5_PAD_RETENTION_GPIO_SYSMEM_SYS_PWR_REG S5P_PMUREG(0x123C)
+#define EXYNOS5_PAD_ISOLATION_SYS_PWR_REG S5P_PMUREG(0x1240)
+#define EXYNOS5_PAD_ISOLATION_SYSMEM_SYS_PWR_REG S5P_PMUREG(0x1250)
+#define EXYNOS5_PAD_ALV_SEL_SYS_PWR_REG S5P_PMUREG(0x1260)
+#define EXYNOS5_XUSBXTI_SYS_PWR_REG S5P_PMUREG(0x1280)
+#define EXYNOS5_XXTI_SYS_PWR_REG S5P_PMUREG(0x1284)
+#define EXYNOS5_EXT_REGULATOR_SYS_PWR_REG S5P_PMUREG(0x12C0)
+#define EXYNOS5_GPIO_MODE_SYS_PWR_REG S5P_PMUREG(0x1300)
+#define EXYNOS5_GPIO_MODE_SYSMEM_SYS_PWR_REG S5P_PMUREG(0x1320)
+#define EXYNOS5_GPIO_MODE_MAU_SYS_PWR_REG S5P_PMUREG(0x1340)
+#define EXYNOS5_TOP_ASB_RESET_SYS_PWR_REG S5P_PMUREG(0x1344)
+#define EXYNOS5_TOP_ASB_ISOLATION_SYS_PWR_REG S5P_PMUREG(0x1348)
+#define EXYNOS5_GSCL_SYS_PWR_REG S5P_PMUREG(0x1400)
+#define EXYNOS5_ISP_SYS_PWR_REG S5P_PMUREG(0x1404)
+#define EXYNOS5_MFC_SYS_PWR_REG S5P_PMUREG(0x1408)
+#define EXYNOS5_G3D_SYS_PWR_REG S5P_PMUREG(0x140C)
+#define EXYNOS5_DISP1_SYS_PWR_REG S5P_PMUREG(0x1414)
+#define EXYNOS5_MAU_SYS_PWR_REG S5P_PMUREG(0x1418)
+#define EXYNOS5_CMU_CLKSTOP_GSCL_SYS_PWR_REG S5P_PMUREG(0x1480)
+#define EXYNOS5_CMU_CLKSTOP_ISP_SYS_PWR_REG S5P_PMUREG(0x1484)
+#define EXYNOS5_CMU_CLKSTOP_MFC_SYS_PWR_REG S5P_PMUREG(0x1488)
+#define EXYNOS5_CMU_CLKSTOP_G3D_SYS_PWR_REG S5P_PMUREG(0x148C)
+#define EXYNOS5_CMU_CLKSTOP_DISP1_SYS_PWR_REG S5P_PMUREG(0x1494)
+#define EXYNOS5_CMU_CLKSTOP_MAU_SYS_PWR_REG S5P_PMUREG(0x1498)
+#define EXYNOS5_CMU_SYSCLK_GSCL_SYS_PWR_REG S5P_PMUREG(0x14C0)
+#define EXYNOS5_CMU_SYSCLK_ISP_SYS_PWR_REG S5P_PMUREG(0x14C4)
+#define EXYNOS5_CMU_SYSCLK_MFC_SYS_PWR_REG S5P_PMUREG(0x14C8)
+#define EXYNOS5_CMU_SYSCLK_G3D_SYS_PWR_REG S5P_PMUREG(0x14CC)
+#define EXYNOS5_CMU_SYSCLK_DISP1_SYS_PWR_REG S5P_PMUREG(0x14D4)
+#define EXYNOS5_CMU_SYSCLK_MAU_SYS_PWR_REG S5P_PMUREG(0x14D8)
+#define EXYNOS5_CMU_RESET_GSCL_SYS_PWR_REG S5P_PMUREG(0x1580)
+#define EXYNOS5_CMU_RESET_ISP_SYS_PWR_REG S5P_PMUREG(0x1584)
+#define EXYNOS5_CMU_RESET_MFC_SYS_PWR_REG S5P_PMUREG(0x1588)
+#define EXYNOS5_CMU_RESET_G3D_SYS_PWR_REG S5P_PMUREG(0x158C)
+#define EXYNOS5_CMU_RESET_DISP1_SYS_PWR_REG S5P_PMUREG(0x1594)
+#define EXYNOS5_CMU_RESET_MAU_SYS_PWR_REG S5P_PMUREG(0x1598)
+
+#define EXYNOS5_ARM_CORE0_OPTION S5P_PMUREG(0x2008)
+#define EXYNOS5_ARM_CORE1_OPTION S5P_PMUREG(0x2088)
+#define EXYNOS5_FSYS_ARM_OPTION S5P_PMUREG(0x2208)
+#define EXYNOS5_ISP_ARM_OPTION S5P_PMUREG(0x2288)
+#define EXYNOS5_ARM_COMMON_OPTION S5P_PMUREG(0x2408)
+#define EXYNOS5_TOP_PWR_OPTION S5P_PMUREG(0x2C48)
+#define EXYNOS5_TOP_PWR_SYSMEM_OPTION S5P_PMUREG(0x2CC8)
+#define EXYNOS5_JPEG_MEM_OPTION S5P_PMUREG(0x2F48)
+#define EXYNOS5_GSCL_STATUS S5P_PMUREG(0x4004)
+#define EXYNOS5_ISP_STATUS S5P_PMUREG(0x4024)
+#define EXYNOS5_GSCL_OPTION S5P_PMUREG(0x4008)
+#define EXYNOS5_ISP_OPTION S5P_PMUREG(0x4028)
+#define EXYNOS5_MFC_OPTION S5P_PMUREG(0x4048)
+#define EXYNOS5_G3D_CONFIGURATION S5P_PMUREG(0x4060)
+#define EXYNOS5_G3D_STATUS S5P_PMUREG(0x4064)
+#define EXYNOS5_G3D_OPTION S5P_PMUREG(0x4068)
+#define EXYNOS5_DISP1_OPTION S5P_PMUREG(0x40A8)
+#define EXYNOS5_MAU_OPTION S5P_PMUREG(0x40C8)
+
+#define EXYNOS5_USE_SC_FEEDBACK (1 << 1)
+#define EXYNOS5_USE_SC_COUNTER (1 << 0)
+
+#define EXYNOS5_MANUAL_L2RSTDISABLE_CONTROL (1 << 2)
+#define EXYNOS5_SKIP_DEACTIVATE_ACEACP_IN_PWDN (1 << 7)
+
+#define EXYNOS5_OPTION_USE_STANDBYWFE (1 << 24)
+#define EXYNOS5_OPTION_USE_STANDBYWFI (1 << 16)
+
+#define EXYNOS5_OPTION_USE_RETENTION (1 << 4)
+
#endif /* __ASM_ARCH_REGS_PMU_H */
diff --git a/arch/arm/mach-exynos/pm.c b/arch/arm/mach-exynos/pm.c
index 563dea9..c06c992 100644
--- a/arch/arm/mach-exynos/pm.c
+++ b/arch/arm/mach-exynos/pm.c
@@ -1,9 +1,8 @@
-/* linux/arch/arm/mach-exynos4/pm.c
- *
- * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+/*
+ * Copyright (c) 2011-2012 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
- * EXYNOS4210 - Power Management support
+ * EXYNOS - Power Management support
*
* Based on arch/arm/mach-s3c2410/pm.c
* Copyright (c) 2006 Simtec Electronics
@@ -63,90 +62,7 @@
SAVE_ITEM(EXYNOS4_VPLL_CON1),
};
-static struct sleep_save exynos4_core_save[] = {
- /* GIC side */
- SAVE_ITEM(S5P_VA_GIC_CPU + 0x000),
- SAVE_ITEM(S5P_VA_GIC_CPU + 0x004),
- SAVE_ITEM(S5P_VA_GIC_CPU + 0x008),
- SAVE_ITEM(S5P_VA_GIC_CPU + 0x00C),
- SAVE_ITEM(S5P_VA_GIC_CPU + 0x014),
- SAVE_ITEM(S5P_VA_GIC_CPU + 0x018),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x000),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x004),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x100),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x104),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x108),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x300),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x304),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x308),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x400),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x404),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x408),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x40C),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x410),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x414),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x418),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x41C),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x420),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x424),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x428),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x42C),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x430),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x434),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x438),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x43C),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x440),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x444),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x448),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x44C),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x450),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x454),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x458),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x45C),
-
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x800),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x804),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x808),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x80C),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x810),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x814),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x818),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x81C),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x820),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x824),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x828),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x82C),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x830),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x834),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x838),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x83C),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x840),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x844),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x848),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x84C),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x850),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x854),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x858),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x85C),
-
- SAVE_ITEM(S5P_VA_GIC_DIST + 0xC00),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0xC04),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0xC08),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0xC0C),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0xC10),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0xC14),
-
- SAVE_ITEM(S5P_VA_COMBINER_BASE + 0x000),
- SAVE_ITEM(S5P_VA_COMBINER_BASE + 0x010),
- SAVE_ITEM(S5P_VA_COMBINER_BASE + 0x020),
- SAVE_ITEM(S5P_VA_COMBINER_BASE + 0x030),
- SAVE_ITEM(S5P_VA_COMBINER_BASE + 0x040),
- SAVE_ITEM(S5P_VA_COMBINER_BASE + 0x050),
- SAVE_ITEM(S5P_VA_COMBINER_BASE + 0x060),
- SAVE_ITEM(S5P_VA_COMBINER_BASE + 0x070),
- SAVE_ITEM(S5P_VA_COMBINER_BASE + 0x080),
- SAVE_ITEM(S5P_VA_COMBINER_BASE + 0x090),
-
+static struct sleep_save exynos_core_save[] = {
/* SROM side */
SAVE_ITEM(S5P_SROM_BW),
SAVE_ITEM(S5P_SROM_BC0),
@@ -159,9 +75,11 @@
/* For Cortex-A9 Diagnostic and Power control register */
static unsigned int save_arm_register[2];
-static int exynos4_cpu_suspend(unsigned long arg)
+static int exynos_cpu_suspend(unsigned long arg)
{
+#ifdef CONFIG_CACHE_L2X0
outer_flush_all();
+#endif
/* issue the standby signal into the pm unit. */
cpu_do_idle();
@@ -170,19 +88,25 @@
panic("sleep resumed to originator?");
}
-static void exynos4_pm_prepare(void)
+static void exynos_pm_prepare(void)
{
- u32 tmp;
+ unsigned int tmp;
- s3c_pm_do_save(exynos4_core_save, ARRAY_SIZE(exynos4_core_save));
- s3c_pm_do_save(exynos4_epll_save, ARRAY_SIZE(exynos4_epll_save));
- s3c_pm_do_save(exynos4_vpll_save, ARRAY_SIZE(exynos4_vpll_save));
+ s3c_pm_do_save(exynos_core_save, ARRAY_SIZE(exynos_core_save));
- tmp = __raw_readl(S5P_INFORM1);
+ if (!soc_is_exynos5250()) {
+ s3c_pm_do_save(exynos4_epll_save, ARRAY_SIZE(exynos4_epll_save));
+ s3c_pm_do_save(exynos4_vpll_save, ARRAY_SIZE(exynos4_vpll_save));
+ } else {
+ /* Disable USE_RETENTION of JPEG_MEM_OPTION */
+ tmp = __raw_readl(EXYNOS5_JPEG_MEM_OPTION);
+ tmp &= ~EXYNOS5_OPTION_USE_RETENTION;
+ __raw_writel(tmp, EXYNOS5_JPEG_MEM_OPTION);
+ }
/* Set value of power down register for sleep mode */
- exynos4_sys_powerdown_conf(SYS_SLEEP);
+ exynos_sys_powerdown_conf(SYS_SLEEP);
__raw_writel(S5P_CHECK_SLEEP, S5P_INFORM1);
/* ensure at least INFORM0 has the resume address */
@@ -191,17 +115,18 @@
/* Before enter central sequence mode, clock src register have to set */
- s3c_pm_do_restore_core(exynos4_set_clksrc, ARRAY_SIZE(exynos4_set_clksrc));
+ if (!soc_is_exynos5250())
+ s3c_pm_do_restore_core(exynos4_set_clksrc, ARRAY_SIZE(exynos4_set_clksrc));
if (soc_is_exynos4210())
s3c_pm_do_restore_core(exynos4210_set_clksrc, ARRAY_SIZE(exynos4210_set_clksrc));
}
-static int exynos4_pm_add(struct device *dev, struct subsys_interface *sif)
+static int exynos_pm_add(struct device *dev, struct subsys_interface *sif)
{
- pm_cpu_prep = exynos4_pm_prepare;
- pm_cpu_sleep = exynos4_cpu_suspend;
+ pm_cpu_prep = exynos_pm_prepare;
+ pm_cpu_sleep = exynos_cpu_suspend;
return 0;
}
@@ -273,13 +198,13 @@
} while (epll_wait || vpll_wait);
}
-static struct subsys_interface exynos4_pm_interface = {
- .name = "exynos4_pm",
+static struct subsys_interface exynos_pm_interface = {
+ .name = "exynos_pm",
.subsys = &exynos_subsys,
- .add_dev = exynos4_pm_add,
+ .add_dev = exynos_pm_add,
};
-static __init int exynos4_pm_drvinit(void)
+static __init int exynos_pm_drvinit(void)
{
struct clk *pll_base;
unsigned int tmp;
@@ -292,18 +217,20 @@
tmp |= ((0xFF << 8) | (0x1F << 1));
__raw_writel(tmp, S5P_WAKEUP_MASK);
- pll_base = clk_get(NULL, "xtal");
+ if (!soc_is_exynos5250()) {
+ pll_base = clk_get(NULL, "xtal");
- if (!IS_ERR(pll_base)) {
- pll_base_rate = clk_get_rate(pll_base);
- clk_put(pll_base);
+ if (!IS_ERR(pll_base)) {
+ pll_base_rate = clk_get_rate(pll_base);
+ clk_put(pll_base);
+ }
}
- return subsys_interface_register(&exynos4_pm_interface);
+ return subsys_interface_register(&exynos_pm_interface);
}
-arch_initcall(exynos4_pm_drvinit);
+arch_initcall(exynos_pm_drvinit);
-static int exynos4_pm_suspend(void)
+static int exynos_pm_suspend(void)
{
unsigned long tmp;
@@ -313,27 +240,27 @@
tmp &= ~S5P_CENTRAL_LOWPWR_CFG;
__raw_writel(tmp, S5P_CENTRAL_SEQ_CONFIGURATION);
- if (soc_is_exynos4212() || soc_is_exynos4412()) {
- tmp = __raw_readl(S5P_CENTRAL_SEQ_OPTION);
- tmp &= ~(S5P_USE_STANDBYWFI_ISP_ARM |
- S5P_USE_STANDBYWFE_ISP_ARM);
- __raw_writel(tmp, S5P_CENTRAL_SEQ_OPTION);
+ /* Setting SEQ_OPTION register */
+
+ tmp = (S5P_USE_STANDBY_WFI0 | S5P_USE_STANDBY_WFE0);
+ __raw_writel(tmp, S5P_CENTRAL_SEQ_OPTION);
+
+ if (!soc_is_exynos5250()) {
+ /* Save Power control register */
+ asm ("mrc p15, 0, %0, c15, c0, 0"
+ : "=r" (tmp) : : "cc");
+ save_arm_register[0] = tmp;
+
+ /* Save Diagnostic register */
+ asm ("mrc p15, 0, %0, c15, c0, 1"
+ : "=r" (tmp) : : "cc");
+ save_arm_register[1] = tmp;
}
- /* Save Power control register */
- asm ("mrc p15, 0, %0, c15, c0, 0"
- : "=r" (tmp) : : "cc");
- save_arm_register[0] = tmp;
-
- /* Save Diagnostic register */
- asm ("mrc p15, 0, %0, c15, c0, 1"
- : "=r" (tmp) : : "cc");
- save_arm_register[1] = tmp;
-
return 0;
}
-static void exynos4_pm_resume(void)
+static void exynos_pm_resume(void)
{
unsigned long tmp;
@@ -350,17 +277,19 @@
/* No need to perform below restore code */
goto early_wakeup;
}
- /* Restore Power control register */
- tmp = save_arm_register[0];
- asm volatile ("mcr p15, 0, %0, c15, c0, 0"
- : : "r" (tmp)
- : "cc");
+ if (!soc_is_exynos5250()) {
+ /* Restore Power control register */
+ tmp = save_arm_register[0];
+ asm volatile ("mcr p15, 0, %0, c15, c0, 0"
+ : : "r" (tmp)
+ : "cc");
- /* Restore Diagnostic register */
- tmp = save_arm_register[1];
- asm volatile ("mcr p15, 0, %0, c15, c0, 1"
- : : "r" (tmp)
- : "cc");
+ /* Restore Diagnostic register */
+ tmp = save_arm_register[1];
+ asm volatile ("mcr p15, 0, %0, c15, c0, 1"
+ : : "r" (tmp)
+ : "cc");
+ }
/* For release retention */
@@ -372,26 +301,28 @@
__raw_writel((1 << 28), S5P_PAD_RET_EBIA_OPTION);
__raw_writel((1 << 28), S5P_PAD_RET_EBIB_OPTION);
- s3c_pm_do_restore_core(exynos4_core_save, ARRAY_SIZE(exynos4_core_save));
+ s3c_pm_do_restore_core(exynos_core_save, ARRAY_SIZE(exynos_core_save));
- exynos4_restore_pll();
+ if (!soc_is_exynos5250()) {
+ exynos4_restore_pll();
#ifdef CONFIG_SMP
- scu_enable(S5P_VA_SCU);
+ scu_enable(S5P_VA_SCU);
#endif
+ }
early_wakeup:
return;
}
-static struct syscore_ops exynos4_pm_syscore_ops = {
- .suspend = exynos4_pm_suspend,
- .resume = exynos4_pm_resume,
+static struct syscore_ops exynos_pm_syscore_ops = {
+ .suspend = exynos_pm_suspend,
+ .resume = exynos_pm_resume,
};
-static __init int exynos4_pm_syscore_init(void)
+static __init int exynos_pm_syscore_init(void)
{
- register_syscore_ops(&exynos4_pm_syscore_ops);
+ register_syscore_ops(&exynos_pm_syscore_ops);
return 0;
}
-arch_initcall(exynos4_pm_syscore_init);
+arch_initcall(exynos_pm_syscore_init);
diff --git a/arch/arm/mach-exynos/pmu.c b/arch/arm/mach-exynos/pmu.c
index 77c6815..4aacb66 100644
--- a/arch/arm/mach-exynos/pmu.c
+++ b/arch/arm/mach-exynos/pmu.c
@@ -1,9 +1,8 @@
-/* linux/arch/arm/mach-exynos4/pmu.c
- *
- * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+/*
+ * Copyright (c) 2011-2012 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
*
- * EXYNOS4210 - CPU PMU(Power Management Unit) support
+ * EXYNOS - CPU PMU(Power Management Unit) support
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -12,13 +11,14 @@
#include <linux/io.h>
#include <linux/kernel.h>
+#include <linux/bug.h>
#include <mach/regs-clock.h>
#include <mach/pmu.h>
-static struct exynos4_pmu_conf *exynos4_pmu_config;
+static struct exynos_pmu_conf *exynos_pmu_config;
-static struct exynos4_pmu_conf exynos4210_pmu_config[] = {
+static struct exynos_pmu_conf exynos4210_pmu_config[] = {
/* { .reg = address, .val = { AFTR, LPA, SLEEP } */
{ S5P_ARM_CORE0_LOWPWR, { 0x0, 0x0, 0x2 } },
{ S5P_DIS_IRQ_CORE0, { 0x0, 0x0, 0x0 } },
@@ -94,7 +94,7 @@
{ PMU_TABLE_END,},
};
-static struct exynos4_pmu_conf exynos4x12_pmu_config[] = {
+static struct exynos_pmu_conf exynos4x12_pmu_config[] = {
{ S5P_ARM_CORE0_LOWPWR, { 0x0, 0x0, 0x2 } },
{ S5P_DIS_IRQ_CORE0, { 0x0, 0x0, 0x0 } },
{ S5P_DIS_IRQ_CENTRAL0, { 0x0, 0x0, 0x0 } },
@@ -202,7 +202,7 @@
{ PMU_TABLE_END,},
};
-static struct exynos4_pmu_conf exynos4412_pmu_config[] = {
+static struct exynos_pmu_conf exynos4412_pmu_config[] = {
{ S5P_ARM_CORE2_LOWPWR, { 0x0, 0x0, 0x2 } },
{ S5P_DIS_IRQ_CORE2, { 0x0, 0x0, 0x0 } },
{ S5P_DIS_IRQ_CENTRAL2, { 0x0, 0x0, 0x0 } },
@@ -212,13 +212,174 @@
{ PMU_TABLE_END,},
};
-void exynos4_sys_powerdown_conf(enum sys_powerdown mode)
+static struct exynos_pmu_conf exynos5250_pmu_config[] = {
+ /* { .reg = address, .val = { AFTR, LPA, SLEEP } */
+ { EXYNOS5_ARM_CORE0_SYS_PWR_REG, { 0x0, 0x0, 0x2} },
+ { EXYNOS5_DIS_IRQ_ARM_CORE0_LOCAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5_DIS_IRQ_ARM_CORE0_CENTRAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5_ARM_CORE1_SYS_PWR_REG, { 0x0, 0x0, 0x2} },
+ { EXYNOS5_DIS_IRQ_ARM_CORE1_LOCAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5_DIS_IRQ_ARM_CORE1_CENTRAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5_FSYS_ARM_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_DIS_IRQ_FSYS_ARM_CENTRAL_SYS_PWR_REG, { 0x1, 0x1, 0x1} },
+ { EXYNOS5_ISP_ARM_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_DIS_IRQ_ISP_ARM_LOCAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5_DIS_IRQ_ISP_ARM_CENTRAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5_ARM_COMMON_SYS_PWR_REG, { 0x0, 0x0, 0x2} },
+ { EXYNOS5_ARM_L2_SYS_PWR_REG, { 0x3, 0x3, 0x3} },
+ { EXYNOS5_CMU_ACLKSTOP_SYS_PWR_REG, { 0x1, 0x0, 0x1} },
+ { EXYNOS5_CMU_SCLKSTOP_SYS_PWR_REG, { 0x1, 0x0, 0x1} },
+ { EXYNOS5_CMU_RESET_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS5_CMU_ACLKSTOP_SYSMEM_SYS_PWR_REG, { 0x1, 0x0, 0x1} },
+ { EXYNOS5_CMU_SCLKSTOP_SYSMEM_SYS_PWR_REG, { 0x1, 0x0, 0x1} },
+ { EXYNOS5_CMU_RESET_SYSMEM_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS5_DRAM_FREQ_DOWN_SYS_PWR_REG, { 0x1, 0x1, 0x1} },
+ { EXYNOS5_DDRPHY_DLLOFF_SYS_PWR_REG, { 0x1, 0x1, 0x1} },
+ { EXYNOS5_DDRPHY_DLLLOCK_SYS_PWR_REG, { 0x1, 0x1, 0x1} },
+ { EXYNOS5_APLL_SYSCLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_MPLL_SYSCLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_VPLL_SYSCLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_EPLL_SYSCLK_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS5_BPLL_SYSCLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_CPLL_SYSCLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_MPLLUSER_SYSCLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_BPLLUSER_SYSCLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_TOP_BUS_SYS_PWR_REG, { 0x3, 0x0, 0x0} },
+ { EXYNOS5_TOP_RETENTION_SYS_PWR_REG, { 0x1, 0x0, 0x1} },
+ { EXYNOS5_TOP_PWR_SYS_PWR_REG, { 0x3, 0x0, 0x3} },
+ { EXYNOS5_TOP_BUS_SYSMEM_SYS_PWR_REG, { 0x3, 0x0, 0x0} },
+ { EXYNOS5_TOP_RETENTION_SYSMEM_SYS_PWR_REG, { 0x1, 0x0, 0x1} },
+ { EXYNOS5_TOP_PWR_SYSMEM_SYS_PWR_REG, { 0x3, 0x0, 0x3} },
+ { EXYNOS5_LOGIC_RESET_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS5_OSCCLK_GATE_SYS_PWR_REG, { 0x1, 0x0, 0x1} },
+ { EXYNOS5_LOGIC_RESET_SYSMEM_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS5_OSCCLK_GATE_SYSMEM_SYS_PWR_REG, { 0x1, 0x0, 0x1} },
+ { EXYNOS5_USBOTG_MEM_SYS_PWR_REG, { 0x3, 0x0, 0x0} },
+ { EXYNOS5_G2D_MEM_SYS_PWR_REG, { 0x3, 0x0, 0x0} },
+ { EXYNOS5_USBDRD_MEM_SYS_PWR_REG, { 0x3, 0x0, 0x0} },
+ { EXYNOS5_SDMMC_MEM_SYS_PWR_REG, { 0x3, 0x0, 0x0} },
+ { EXYNOS5_CSSYS_MEM_SYS_PWR_REG, { 0x3, 0x0, 0x0} },
+ { EXYNOS5_SECSS_MEM_SYS_PWR_REG, { 0x3, 0x0, 0x0} },
+ { EXYNOS5_ROTATOR_MEM_SYS_PWR_REG, { 0x3, 0x0, 0x0} },
+ { EXYNOS5_INTRAM_MEM_SYS_PWR_REG, { 0x3, 0x0, 0x0} },
+ { EXYNOS5_INTROM_MEM_SYS_PWR_REG, { 0x3, 0x0, 0x0} },
+ { EXYNOS5_JPEG_MEM_SYS_PWR_REG, { 0x3, 0x0, 0x0} },
+ { EXYNOS5_HSI_MEM_SYS_PWR_REG, { 0x3, 0x0, 0x0} },
+ { EXYNOS5_MCUIOP_MEM_SYS_PWR_REG, { 0x3, 0x0, 0x0} },
+ { EXYNOS5_SATA_MEM_SYS_PWR_REG, { 0x3, 0x0, 0x0} },
+ { EXYNOS5_PAD_RETENTION_DRAM_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_PAD_RETENTION_MAU_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS5_PAD_RETENTION_GPIO_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_PAD_RETENTION_UART_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_PAD_RETENTION_MMCA_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_PAD_RETENTION_MMCB_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_PAD_RETENTION_EBIA_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_PAD_RETENTION_EBIB_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_PAD_RETENTION_SPI_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_PAD_RETENTION_GPIO_SYSMEM_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_PAD_ISOLATION_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_PAD_ISOLATION_SYSMEM_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_PAD_ALV_SEL_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_XUSBXTI_SYS_PWR_REG, { 0x1, 0x1, 0x1} },
+ { EXYNOS5_XXTI_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS5_EXT_REGULATOR_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS5_GPIO_MODE_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_GPIO_MODE_SYSMEM_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_GPIO_MODE_MAU_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS5_TOP_ASB_RESET_SYS_PWR_REG, { 0x1, 0x1, 0x1} },
+ { EXYNOS5_TOP_ASB_ISOLATION_SYS_PWR_REG, { 0x1, 0x0, 0x1} },
+ { EXYNOS5_GSCL_SYS_PWR_REG, { 0x7, 0x0, 0x0} },
+ { EXYNOS5_ISP_SYS_PWR_REG, { 0x7, 0x0, 0x0} },
+ { EXYNOS5_MFC_SYS_PWR_REG, { 0x7, 0x0, 0x0} },
+ { EXYNOS5_G3D_SYS_PWR_REG, { 0x7, 0x0, 0x0} },
+ { EXYNOS5_DISP1_SYS_PWR_REG, { 0x7, 0x0, 0x0} },
+ { EXYNOS5_MAU_SYS_PWR_REG, { 0x7, 0x7, 0x0} },
+ { EXYNOS5_CMU_CLKSTOP_GSCL_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_CMU_CLKSTOP_ISP_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_CMU_CLKSTOP_MFC_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_CMU_CLKSTOP_G3D_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_CMU_CLKSTOP_DISP1_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_CMU_CLKSTOP_MAU_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS5_CMU_SYSCLK_GSCL_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_CMU_SYSCLK_ISP_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_CMU_SYSCLK_MFC_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_CMU_SYSCLK_G3D_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_CMU_SYSCLK_DISP1_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_CMU_SYSCLK_MAU_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS5_CMU_RESET_GSCL_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_CMU_RESET_ISP_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_CMU_RESET_MFC_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_CMU_RESET_G3D_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_CMU_RESET_DISP1_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_CMU_RESET_MAU_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { PMU_TABLE_END,},
+};
+
+void __iomem *exynos5_list_both_cnt_feed[] = {
+ EXYNOS5_ARM_CORE0_OPTION,
+ EXYNOS5_ARM_CORE1_OPTION,
+ EXYNOS5_ARM_COMMON_OPTION,
+ EXYNOS5_GSCL_OPTION,
+ EXYNOS5_ISP_OPTION,
+ EXYNOS5_MFC_OPTION,
+ EXYNOS5_G3D_OPTION,
+ EXYNOS5_DISP1_OPTION,
+ EXYNOS5_MAU_OPTION,
+ EXYNOS5_TOP_PWR_OPTION,
+ EXYNOS5_TOP_PWR_SYSMEM_OPTION,
+};
+
+void __iomem *exynos5_list_diable_wfi_wfe[] = {
+ EXYNOS5_ARM_CORE1_OPTION,
+ EXYNOS5_FSYS_ARM_OPTION,
+ EXYNOS5_ISP_ARM_OPTION,
+};
+
+static void exynos5_init_pmu(void)
+{
+ unsigned int i;
+ unsigned int tmp;
+
+ /*
+ * Enable both SC_FEEDBACK and SC_COUNTER
+ */
+ for (i = 0 ; i < ARRAY_SIZE(exynos5_list_both_cnt_feed) ; i++) {
+ tmp = __raw_readl(exynos5_list_both_cnt_feed[i]);
+ tmp |= (EXYNOS5_USE_SC_FEEDBACK |
+ EXYNOS5_USE_SC_COUNTER);
+ __raw_writel(tmp, exynos5_list_both_cnt_feed[i]);
+ }
+
+ /*
+ * SKIP_DEACTIVATE_ACEACP_IN_PWDN_BITFIELD Enable
+ * MANUAL_L2RSTDISABLE_CONTROL_BITFIELD Enable
+ */
+ tmp = __raw_readl(EXYNOS5_ARM_COMMON_OPTION);
+ tmp |= (EXYNOS5_MANUAL_L2RSTDISABLE_CONTROL |
+ EXYNOS5_SKIP_DEACTIVATE_ACEACP_IN_PWDN);
+ __raw_writel(tmp, EXYNOS5_ARM_COMMON_OPTION);
+
+ /*
+ * Disable WFI/WFE on XXX_OPTION
+ */
+ for (i = 0 ; i < ARRAY_SIZE(exynos5_list_diable_wfi_wfe) ; i++) {
+ tmp = __raw_readl(exynos5_list_diable_wfi_wfe[i]);
+ tmp &= ~(EXYNOS5_OPTION_USE_STANDBYWFE |
+ EXYNOS5_OPTION_USE_STANDBYWFI);
+ __raw_writel(tmp, exynos5_list_diable_wfi_wfe[i]);
+ }
+}
+
+void exynos_sys_powerdown_conf(enum sys_powerdown mode)
{
unsigned int i;
- for (i = 0; (exynos4_pmu_config[i].reg != PMU_TABLE_END) ; i++)
- __raw_writel(exynos4_pmu_config[i].val[mode],
- exynos4_pmu_config[i].reg);
+ if (soc_is_exynos5250())
+ exynos5_init_pmu();
+
+ for (i = 0; (exynos_pmu_config[i].reg != PMU_TABLE_END) ; i++)
+ __raw_writel(exynos_pmu_config[i].val[mode],
+ exynos_pmu_config[i].reg);
if (soc_is_exynos4412()) {
for (i = 0; exynos4412_pmu_config[i].reg != PMU_TABLE_END ; i++)
@@ -227,20 +388,23 @@
}
}
-static int __init exynos4_pmu_init(void)
+static int __init exynos_pmu_init(void)
{
- exynos4_pmu_config = exynos4210_pmu_config;
+ exynos_pmu_config = exynos4210_pmu_config;
if (soc_is_exynos4210()) {
- exynos4_pmu_config = exynos4210_pmu_config;
+ exynos_pmu_config = exynos4210_pmu_config;
pr_info("EXYNOS4210 PMU Initialize\n");
} else if (soc_is_exynos4212() || soc_is_exynos4412()) {
- exynos4_pmu_config = exynos4x12_pmu_config;
+ exynos_pmu_config = exynos4x12_pmu_config;
pr_info("EXYNOS4x12 PMU Initialize\n");
+ } else if (soc_is_exynos5250()) {
+ exynos_pmu_config = exynos5250_pmu_config;
+ pr_info("EXYNOS5250 PMU Initialize\n");
} else {
- pr_info("EXYNOS4: PMU not supported\n");
+ pr_info("EXYNOS: PMU not supported\n");
}
return 0;
}
-arch_initcall(exynos4_pmu_init);
+arch_initcall(exynos_pmu_init);
diff --git a/arch/arm/mach-ixp4xx/common.c b/arch/arm/mach-ixp4xx/common.c
index ebbd7fc..a9f80943 100644
--- a/arch/arm/mach-ixp4xx/common.c
+++ b/arch/arm/mach-ixp4xx/common.c
@@ -28,6 +28,7 @@
#include <linux/clockchips.h>
#include <linux/io.h>
#include <linux/export.h>
+#include <linux/gpio.h>
#include <mach/udc.h>
#include <mach/hardware.h>
@@ -107,7 +108,7 @@
7, 8, 9, 10, 11, 12, -1, -1,
};
-int gpio_to_irq(int gpio)
+static int ixp4xx_gpio_to_irq(struct gpio_chip *chip, unsigned gpio)
{
int irq;
@@ -117,7 +118,6 @@
}
return -EINVAL;
}
-EXPORT_SYMBOL(gpio_to_irq);
int irq_to_gpio(unsigned int irq)
{
@@ -383,12 +383,56 @@
unsigned long ixp4xx_exp_bus_size;
EXPORT_SYMBOL(ixp4xx_exp_bus_size);
+static int ixp4xx_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
+{
+ gpio_line_config(gpio, IXP4XX_GPIO_IN);
+
+ return 0;
+}
+
+static int ixp4xx_gpio_direction_output(struct gpio_chip *chip, unsigned gpio,
+ int level)
+{
+ gpio_line_set(gpio, level);
+ gpio_line_config(gpio, IXP4XX_GPIO_OUT);
+
+ return 0;
+}
+
+static int ixp4xx_gpio_get_value(struct gpio_chip *chip, unsigned gpio)
+{
+ int value;
+
+ gpio_line_get(gpio, &value);
+
+ return value;
+}
+
+static void ixp4xx_gpio_set_value(struct gpio_chip *chip, unsigned gpio,
+ int value)
+{
+ gpio_line_set(gpio, value);
+}
+
+static struct gpio_chip ixp4xx_gpio_chip = {
+ .label = "IXP4XX_GPIO_CHIP",
+ .direction_input = ixp4xx_gpio_direction_input,
+ .direction_output = ixp4xx_gpio_direction_output,
+ .get = ixp4xx_gpio_get_value,
+ .set = ixp4xx_gpio_set_value,
+ .to_irq = ixp4xx_gpio_to_irq,
+ .base = 0,
+ .ngpio = 16,
+};
+
void __init ixp4xx_sys_init(void)
{
ixp4xx_exp_bus_size = SZ_16M;
platform_add_devices(ixp4xx_devices, ARRAY_SIZE(ixp4xx_devices));
+ gpiochip_add(&ixp4xx_gpio_chip);
+
if (cpu_is_ixp46x()) {
int region;
diff --git a/arch/arm/mach-ixp4xx/include/mach/gpio.h b/arch/arm/mach-ixp4xx/include/mach/gpio.h
index 83d6b4e..ef37f26 100644
--- a/arch/arm/mach-ixp4xx/include/mach/gpio.h
+++ b/arch/arm/mach-ixp4xx/include/mach/gpio.h
@@ -1,79 +1,2 @@
-/*
- * arch/arm/mach-ixp4xx/include/mach/gpio.h
- *
- * IXP4XX GPIO wrappers for arch-neutral GPIO calls
- *
- * Written by Milan Svoboda <msvoboda@ra.rockwell.com>
- * Based on PXA implementation by Philipp Zabel <philipp.zabel@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- */
-
-#ifndef __ASM_ARCH_IXP4XX_GPIO_H
-#define __ASM_ARCH_IXP4XX_GPIO_H
-
-#include <linux/kernel.h>
-#include <mach/hardware.h>
-
-#define __ARM_GPIOLIB_COMPLEX
-
-static inline int gpio_request(unsigned gpio, const char *label)
-{
- return 0;
-}
-
-static inline void gpio_free(unsigned gpio)
-{
- might_sleep();
-
- return;
-}
-
-static inline int gpio_direction_input(unsigned gpio)
-{
- gpio_line_config(gpio, IXP4XX_GPIO_IN);
- return 0;
-}
-
-static inline int gpio_direction_output(unsigned gpio, int level)
-{
- gpio_line_set(gpio, level);
- gpio_line_config(gpio, IXP4XX_GPIO_OUT);
- return 0;
-}
-
-static inline int gpio_get_value(unsigned gpio)
-{
- int value;
-
- gpio_line_get(gpio, &value);
-
- return value;
-}
-
-static inline void gpio_set_value(unsigned gpio, int value)
-{
- gpio_line_set(gpio, value);
-}
-
-#include <asm-generic/gpio.h> /* cansleep wrappers */
-
-extern int gpio_to_irq(int gpio);
-#define gpio_to_irq gpio_to_irq
-extern int irq_to_gpio(unsigned int irq);
-
-#endif
+/* empty */
diff --git a/arch/arm/mach-s3c24xx/include/mach/irqs.h b/arch/arm/mach-s3c24xx/include/mach/irqs.h
index e53b217..b7a9f4d 100644
--- a/arch/arm/mach-s3c24xx/include/mach/irqs.h
+++ b/arch/arm/mach-s3c24xx/include/mach/irqs.h
@@ -134,6 +134,17 @@
#define IRQ_S32416_WDT S3C2410_IRQSUB(27)
#define IRQ_S32416_AC97 S3C2410_IRQSUB(28)
+/* second interrupt-register of s3c2416/s3c2450 */
+
+#define S3C2416_IRQ(x) S3C2410_IRQ((x) + 54 + 29)
+#define IRQ_S3C2416_2D S3C2416_IRQ(0)
+#define IRQ_S3C2416_IIC1 S3C2416_IRQ(1)
+#define IRQ_S3C2416_RESERVED2 S3C2416_IRQ(2)
+#define IRQ_S3C2416_RESERVED3 S3C2416_IRQ(3)
+#define IRQ_S3C2416_PCM0 S3C2416_IRQ(4)
+#define IRQ_S3C2416_PCM1 S3C2416_IRQ(5)
+#define IRQ_S3C2416_I2S0 S3C2416_IRQ(6)
+#define IRQ_S3C2416_I2S1 S3C2416_IRQ(7)
/* extra irqs for s3c2440 */
@@ -175,7 +186,9 @@
#define IRQ_S3C2443_WDT S3C2410_IRQSUB(27)
#define IRQ_S3C2443_AC97 S3C2410_IRQSUB(28)
-#if defined(CONFIG_CPU_S3C2443) || defined(CONFIG_CPU_S3C2416)
+#if defined(CONFIG_CPU_S3C2416)
+#define NR_IRQS (IRQ_S3C2416_I2S1 + 1)
+#elif defined(CONFIG_CPU_S3C2443)
#define NR_IRQS (IRQ_S3C2443_AC97+1)
#else
#define NR_IRQS (IRQ_S3C2440_AC97+1)
diff --git a/arch/arm/mach-s3c24xx/irq-s3c2416.c b/arch/arm/mach-s3c24xx/irq-s3c2416.c
index fd49f35..23ec973 100644
--- a/arch/arm/mach-s3c24xx/irq-s3c2416.c
+++ b/arch/arm/mach-s3c24xx/irq-s3c2416.c
@@ -27,6 +27,7 @@
#include <linux/ioport.h>
#include <linux/device.h>
#include <linux/io.h>
+#include <linux/syscore_ops.h>
#include <mach/hardware.h>
#include <asm/irq.h>
@@ -192,6 +193,43 @@
.irq_ack = s3c2416_irq_uart3_ack,
};
+/* second interrupt register */
+
+static inline void s3c2416_irq_ack_second(struct irq_data *data)
+{
+ unsigned long bitval = 1UL << (data->irq - IRQ_S3C2416_2D);
+
+ __raw_writel(bitval, S3C2416_SRCPND2);
+ __raw_writel(bitval, S3C2416_INTPND2);
+}
+
+static void s3c2416_irq_mask_second(struct irq_data *data)
+{
+ unsigned long bitval = 1UL << (data->irq - IRQ_S3C2416_2D);
+ unsigned long mask;
+
+ mask = __raw_readl(S3C2416_INTMSK2);
+ mask |= bitval;
+ __raw_writel(mask, S3C2416_INTMSK2);
+}
+
+static void s3c2416_irq_unmask_second(struct irq_data *data)
+{
+ unsigned long bitval = 1UL << (data->irq - IRQ_S3C2416_2D);
+ unsigned long mask;
+
+ mask = __raw_readl(S3C2416_INTMSK2);
+ mask &= ~bitval;
+ __raw_writel(mask, S3C2416_INTMSK2);
+}
+
+struct irq_chip s3c2416_irq_second = {
+ .irq_ack = s3c2416_irq_ack_second,
+ .irq_mask = s3c2416_irq_mask_second,
+ .irq_unmask = s3c2416_irq_unmask_second,
+};
+
+
/* IRQ initialisation code */
static int __init s3c2416_add_sub(unsigned int base,
@@ -213,6 +251,42 @@
return 0;
}
+static void __init s3c2416_irq_add_second(void)
+{
+ unsigned long pend;
+ unsigned long last;
+ int irqno;
+ int i;
+
+ /* first, clear all interrupts pending... */
+ last = 0;
+ for (i = 0; i < 4; i++) {
+ pend = __raw_readl(S3C2416_INTPND2);
+
+ if (pend == 0 || pend == last)
+ break;
+
+ __raw_writel(pend, S3C2416_SRCPND2);
+ __raw_writel(pend, S3C2416_INTPND2);
+ printk(KERN_INFO "irq: clearing pending status %08x\n",
+ (int)pend);
+ last = pend;
+ }
+
+ for (irqno = IRQ_S3C2416_2D; irqno <= IRQ_S3C2416_I2S1; irqno++) {
+ switch (irqno) {
+ case IRQ_S3C2416_RESERVED2:
+ case IRQ_S3C2416_RESERVED3:
+ /* no IRQ here */
+ break;
+ default:
+ irq_set_chip_and_handler(irqno, &s3c2416_irq_second,
+ handle_edge_irq);
+ set_irq_flags(irqno, IRQF_VALID);
+ }
+ }
+}
+
static int __init s3c2416_irq_add(struct device *dev,
struct subsys_interface *sif)
{
@@ -232,6 +306,8 @@
&s3c2416_irq_wdtac97,
IRQ_S3C2443_WDT, IRQ_S3C2443_AC97);
+ s3c2416_irq_add_second();
+
return 0;
}
@@ -248,3 +324,25 @@
arch_initcall(s3c2416_irq_init);
+#ifdef CONFIG_PM
+static struct sleep_save irq_save[] = {
+ SAVE_ITEM(S3C2416_INTMSK2),
+};
+
+int s3c2416_irq_suspend(void)
+{
+ s3c_pm_do_save(irq_save, ARRAY_SIZE(irq_save));
+
+ return 0;
+}
+
+void s3c2416_irq_resume(void)
+{
+ s3c_pm_do_restore(irq_save, ARRAY_SIZE(irq_save));
+}
+
+struct syscore_ops s3c2416_irq_syscore_ops = {
+ .suspend = s3c2416_irq_suspend,
+ .resume = s3c2416_irq_resume,
+};
+#endif
diff --git a/arch/arm/mach-s3c24xx/s3c2416.c b/arch/arm/mach-s3c24xx/s3c2416.c
index 7743fad..ed5a95ec 100644
--- a/arch/arm/mach-s3c24xx/s3c2416.c
+++ b/arch/arm/mach-s3c24xx/s3c2416.c
@@ -106,6 +106,7 @@
register_syscore_ops(&s3c2416_pm_syscore_ops);
#endif
register_syscore_ops(&s3c24xx_irq_syscore_ops);
+ register_syscore_ops(&s3c2416_irq_syscore_ops);
return device_register(&s3c2416_dev);
}
diff --git a/arch/arm/mach-s3c64xx/cpuidle.c b/arch/arm/mach-s3c64xx/cpuidle.c
index 179460f..acb197c 100644
--- a/arch/arm/mach-s3c64xx/cpuidle.c
+++ b/arch/arm/mach-s3c64xx/cpuidle.c
@@ -27,12 +27,7 @@
struct cpuidle_driver *drv,
int index)
{
- struct timeval before, after;
unsigned long tmp;
- int idle_time;
-
- local_irq_disable();
- do_gettimeofday(&before);
/* Setup PWRCFG to enter idle mode */
tmp = __raw_readl(S3C64XX_PWR_CFG);
@@ -42,42 +37,32 @@
cpu_do_idle();
- do_gettimeofday(&after);
- local_irq_enable();
- idle_time = (after.tv_sec - before.tv_sec) * USEC_PER_SEC +
- (after.tv_usec - before.tv_usec);
-
- dev->last_residency = idle_time;
return index;
}
-static struct cpuidle_state s3c64xx_cpuidle_set[] = {
- [0] = {
- .enter = s3c64xx_enter_idle,
- .exit_latency = 1,
- .target_residency = 1,
- .flags = CPUIDLE_FLAG_TIME_VALID,
- .name = "IDLE",
- .desc = "System active, ARM gated",
- },
-};
+static DEFINE_PER_CPU(struct cpuidle_device, s3c64xx_cpuidle_device);
static struct cpuidle_driver s3c64xx_cpuidle_driver = {
- .name = "s3c64xx_cpuidle",
- .owner = THIS_MODULE,
- .state_count = ARRAY_SIZE(s3c64xx_cpuidle_set),
-};
-
-static struct cpuidle_device s3c64xx_cpuidle_device = {
- .state_count = ARRAY_SIZE(s3c64xx_cpuidle_set),
+ .name = "s3c64xx_cpuidle",
+ .owner = THIS_MODULE,
+ .en_core_tk_irqen = 1,
+ .states = {
+ {
+ .enter = s3c64xx_enter_idle,
+ .exit_latency = 1,
+ .target_residency = 1,
+ .flags = CPUIDLE_FLAG_TIME_VALID,
+ .name = "IDLE",
+ .desc = "System active, ARM gated",
+ },
+ },
+ .state_count = 1,
};
static int __init s3c64xx_init_cpuidle(void)
{
int ret;
- memcpy(s3c64xx_cpuidle_driver.states, s3c64xx_cpuidle_set,
- sizeof(s3c64xx_cpuidle_set));
cpuidle_register_driver(&s3c64xx_cpuidle_driver);
ret = cpuidle_register_device(&s3c64xx_cpuidle_device);
diff --git a/arch/arm/mach-s3c64xx/mach-crag6410-module.c b/arch/arm/mach-s3c64xx/mach-crag6410-module.c
index 0ace108..7a27f56 100644
--- a/arch/arm/mach-s3c64xx/mach-crag6410-module.c
+++ b/arch/arm/mach-s3c64xx/mach-crag6410-module.c
@@ -182,6 +182,11 @@
},
};
+static const struct i2c_board_info wm6230_i2c_devs[] = {
+ { I2C_BOARD_INFO("wm9081", 0x6c),
+ .platform_data = &wm9081_pdata, },
+};
+
static __devinitdata const struct {
u8 id;
const char *name;
@@ -195,7 +200,9 @@
{ .id = 0x03, .name = "1252-EV1 Glenlivet" },
{ .id = 0x11, .name = "6249-EV2 Glenfarclas", },
{ .id = 0x14, .name = "6271-EV1 Lochnagar" },
- { .id = 0x15, .name = "XXXX-EV1 Bells" },
+ { .id = 0x15, .name = "6320-EV1 Bells",
+ .i2c_devs = wm6230_i2c_devs,
+ .num_i2c_devs = ARRAY_SIZE(wm6230_i2c_devs) },
{ .id = 0x21, .name = "1275-EV1 Mortlach" },
{ .id = 0x25, .name = "1274-EV1 Glencadam" },
{ .id = 0x31, .name = "1253-EV1 Tomatin",
diff --git a/arch/arm/mach-s3c64xx/mach-crag6410.c b/arch/arm/mach-s3c64xx/mach-crag6410.c
index eda5e02..6b20a71 100644
--- a/arch/arm/mach-s3c64xx/mach-crag6410.c
+++ b/arch/arm/mach-s3c64xx/mach-crag6410.c
@@ -671,6 +671,7 @@
.irq = S3C_EINT(0),
.platform_data = &glenfarclas_pmic_pdata },
+ { I2C_BOARD_INFO("wlf-gf-module", 0x22) },
{ I2C_BOARD_INFO("wlf-gf-module", 0x24) },
{ I2C_BOARD_INFO("wlf-gf-module", 0x25) },
{ I2C_BOARD_INFO("wlf-gf-module", 0x26) },
diff --git a/arch/arm/mach-vexpress/v2m.c b/arch/arm/mach-vexpress/v2m.c
index 04dd092..fde26ad 100644
--- a/arch/arm/mach-vexpress/v2m.c
+++ b/arch/arm/mach-vexpress/v2m.c
@@ -14,7 +14,6 @@
#include <linux/ata_platform.h>
#include <linux/smsc911x.h>
#include <linux/spinlock.h>
-#include <linux/device.h>
#include <linux/usb/isp1760.h>
#include <linux/clkdev.h>
#include <linux/mtd/physmap.h>
@@ -31,7 +30,6 @@
#include <asm/hardware/gic.h>
#include <asm/hardware/timer-sp.h>
#include <asm/hardware/sp810.h>
-#include <asm/hardware/gic.h>
#include <mach/ct-ca9x4.h>
#include <mach/motherboard.h>
diff --git a/arch/arm/plat-samsung/include/plat/s3c2416.h b/arch/arm/plat-samsung/include/plat/s3c2416.h
index de2b5bd..7178e33 100644
--- a/arch/arm/plat-samsung/include/plat/s3c2416.h
+++ b/arch/arm/plat-samsung/include/plat/s3c2416.h
@@ -24,6 +24,9 @@
extern int s3c2416_baseclk_add(void);
extern void s3c2416_restart(char mode, const char *cmd);
+
+extern struct syscore_ops s3c2416_irq_syscore_ops;
+
#else
#define s3c2416_init_clocks NULL
#define s3c2416_init_uarts NULL
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 83bd051..e74ff13 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -41,7 +41,6 @@
def_bool !64BIT
select GENERIC_ATOMIC64
select CLZ_TAB
- select ARCH_USES_GETTIMEOFFSET
config SPARC64
def_bool 64BIT
diff --git a/arch/sparc/include/asm/asi.h b/arch/sparc/include/asm/asi.h
index cbb93e5..61ebe74 100644
--- a/arch/sparc/include/asm/asi.h
+++ b/arch/sparc/include/asm/asi.h
@@ -40,11 +40,7 @@
#define ASI_M_UNA01 0x01 /* Same here... */
#define ASI_M_MXCC 0x02 /* Access to TI VIKING MXCC registers */
#define ASI_M_FLUSH_PROBE 0x03 /* Reference MMU Flush/Probe; rw, ss */
-#ifndef CONFIG_SPARC_LEON
#define ASI_M_MMUREGS 0x04 /* MMU Registers; rw, ss */
-#else
-#define ASI_M_MMUREGS 0x19
-#endif /* CONFIG_SPARC_LEON */
#define ASI_M_TLBDIAG 0x05 /* MMU TLB only Diagnostics */
#define ASI_M_DIAGS 0x06 /* Reference MMU Diagnostics */
#define ASI_M_IODIAG 0x07 /* MMU I/O TLB only Diagnostics */
diff --git a/arch/sparc/include/asm/asmmacro.h b/arch/sparc/include/asm/asmmacro.h
index 02a172f..a0e28ef 100644
--- a/arch/sparc/include/asm/asmmacro.h
+++ b/arch/sparc/include/asm/asmmacro.h
@@ -20,4 +20,26 @@
/* All traps low-level code here must end with this macro. */
#define RESTORE_ALL b ret_trap_entry; clr %l6;
+/* Support for run-time patching of single instructions.
+ * This is used to handle the differences in the ASI for
+ * MMUREGS for LEON and SUN.
+ *
+ * Sample:
+ * LEON_PI(lda [%g0] ASI_LEON_MMUREGS, %o0
+ * SUN_PI_(lda [%g0] ASI_M_MMUREGS, %o0
+ * PI == Patch Instruction
+ *
+ * For LEON we will use the first variant,
+ * and for all other we will use the SUN variant.
+ * The order is important.
+ */
+#define LEON_PI(...) \
+662: __VA_ARGS__
+
+#define SUN_PI_(...) \
+ .section .leon_1insn_patch, "ax"; \
+ .word 662b; \
+ __VA_ARGS__; \
+ .previous
+
#endif /* !(_SPARC_ASMMACRO_H) */
diff --git a/arch/sparc/include/asm/dma-mapping.h b/arch/sparc/include/asm/dma-mapping.h
index 48a7c65..8493fd3 100644
--- a/arch/sparc/include/asm/dma-mapping.h
+++ b/arch/sparc/include/asm/dma-mapping.h
@@ -12,13 +12,18 @@
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
-extern struct dma_map_ops *dma_ops, pci32_dma_ops;
+extern struct dma_map_ops *dma_ops;
+extern struct dma_map_ops *leon_dma_ops;
+extern struct dma_map_ops pci32_dma_ops;
+
extern struct bus_type pci_bus_type;
static inline struct dma_map_ops *get_dma_ops(struct device *dev)
{
#if defined(CONFIG_SPARC32) && defined(CONFIG_PCI)
- if (dev->bus == &pci_bus_type)
+ if (sparc_cpu_model == sparc_leon)
+ return leon_dma_ops;
+ else if (dev->bus == &pci_bus_type)
return &pci32_dma_ops;
#endif
return dma_ops;
diff --git a/arch/sparc/include/asm/leon.h b/arch/sparc/include/asm/leon.h
index 0765912..3375c62 100644
--- a/arch/sparc/include/asm/leon.h
+++ b/arch/sparc/include/asm/leon.h
@@ -8,8 +8,6 @@
#ifndef LEON_H_INCLUDE
#define LEON_H_INCLUDE
-#ifdef CONFIG_SPARC_LEON
-
/* mmu register access, ASI_LEON_MMUREGS */
#define LEON_CNR_CTRL 0x000
#define LEON_CNR_CTXP 0x100
@@ -62,15 +60,6 @@
#ifndef __ASSEMBLY__
-/* do a virtual address read without cache */
-static inline unsigned long leon_readnobuffer_reg(unsigned long paddr)
-{
- unsigned long retval;
- __asm__ __volatile__("lda [%1] %2, %0\n\t" :
- "=r"(retval) : "r"(paddr), "i"(ASI_LEON_NOCACHE));
- return retval;
-}
-
/* do a physical address bypass write, i.e. for 0x80000000 */
static inline void leon_store_reg(unsigned long paddr, unsigned long value)
{
@@ -87,47 +76,16 @@
return retval;
}
-static inline void leon_srmmu_disabletlb(void)
-{
- unsigned int retval;
- __asm__ __volatile__("lda [%%g0] %2, %0\n\t" : "=r"(retval) : "r"(0),
- "i"(ASI_LEON_MMUREGS));
- retval |= LEON_CNR_CTRL_TLBDIS;
- __asm__ __volatile__("sta %0, [%%g0] %2\n\t" : : "r"(retval), "r"(0),
- "i"(ASI_LEON_MMUREGS) : "memory");
-}
-
-static inline void leon_srmmu_enabletlb(void)
-{
- unsigned int retval;
- __asm__ __volatile__("lda [%%g0] %2, %0\n\t" : "=r"(retval) : "r"(0),
- "i"(ASI_LEON_MMUREGS));
- retval = retval & ~LEON_CNR_CTRL_TLBDIS;
- __asm__ __volatile__("sta %0, [%%g0] %2\n\t" : : "r"(retval), "r"(0),
- "i"(ASI_LEON_MMUREGS) : "memory");
-}
-
/* macro access for leon_load_reg() and leon_store_reg() */
#define LEON3_BYPASS_LOAD_PA(x) (leon_load_reg((unsigned long)(x)))
#define LEON3_BYPASS_STORE_PA(x, v) (leon_store_reg((unsigned long)(x), (unsigned long)(v)))
-#define LEON3_BYPASS_ANDIN_PA(x, v) LEON3_BYPASS_STORE_PA(x, LEON3_BYPASS_LOAD_PA(x) & v)
-#define LEON3_BYPASS_ORIN_PA(x, v) LEON3_BYPASS_STORE_PA(x, LEON3_BYPASS_LOAD_PA(x) | v)
#define LEON_BYPASS_LOAD_PA(x) leon_load_reg((unsigned long)(x))
#define LEON_BYPASS_STORE_PA(x, v) leon_store_reg((unsigned long)(x), (unsigned long)(v))
-#define LEON_REGLOAD_PA(x) leon_load_reg((unsigned long)(x)+LEON_PREGS)
-#define LEON_REGSTORE_PA(x, v) leon_store_reg((unsigned long)(x)+LEON_PREGS, (unsigned long)(v))
-#define LEON_REGSTORE_OR_PA(x, v) LEON_REGSTORE_PA(x, LEON_REGLOAD_PA(x) | (unsigned long)(v))
-#define LEON_REGSTORE_AND_PA(x, v) LEON_REGSTORE_PA(x, LEON_REGLOAD_PA(x) & (unsigned long)(v))
-
-/* macro access for leon_readnobuffer_reg() */
-#define LEON_BYPASSCACHE_LOAD_VA(x) leon_readnobuffer_reg((unsigned long)(x))
extern void leon_init(void);
extern void leon_switch_mm(void);
extern void leon_init_IRQ(void);
-extern unsigned long last_valid_pfn;
-
static inline unsigned long sparc_leon3_get_dcachecfg(void)
{
unsigned int retval;
@@ -230,9 +188,6 @@
#error cannot determine LEON_PAGE_SIZE_LEON
#endif
-#define PAGE_MIN_SHIFT (12)
-#define PAGE_MIN_SIZE (1UL << PAGE_MIN_SHIFT)
-
#define LEON3_XCCR_SETS_MASK 0x07000000UL
#define LEON3_XCCR_SSIZE_MASK 0x00f00000UL
@@ -242,7 +197,7 @@
#ifndef __ASSEMBLY__
struct vm_area_struct;
-extern unsigned long srmmu_swprobe(unsigned long vaddr, unsigned long *paddr);
+extern unsigned long leon_swprobe(unsigned long vaddr, unsigned long *paddr);
extern void leon_flush_icache_all(void);
extern void leon_flush_dcache_all(void);
extern void leon_flush_cache_all(void);
@@ -258,15 +213,7 @@
unsigned long dccr; /* 0x0c - Data Cache Configuration Register */
};
-/* struct that hold LEON2 cache configuration register
- * & configuration register
- */
-struct leon2_cacheregs {
- unsigned long ccr, cfg;
-};
-
-#ifdef __KERNEL__
-
+#include <linux/irq.h>
#include <linux/interrupt.h>
struct device_node;
@@ -292,24 +239,15 @@
extern void leon_boot_cpus(void);
extern int leon_boot_one_cpu(int i, struct task_struct *);
void leon_init_smp(void);
-extern void cpu_idle(void);
-extern void init_IRQ(void);
-extern void cpu_panic(void);
-extern int __leon_processor_id(void);
void leon_enable_irq_cpu(unsigned int irq_nr, unsigned int cpu);
extern irqreturn_t leon_percpu_timer_interrupt(int irq, void *unused);
-extern unsigned int real_irq_entry[];
extern unsigned int smpleon_ipi[];
-extern unsigned int patchme_maybe_smp_msg[];
-extern unsigned int t_nmi[], linux_trap_ipi15_leon[];
-extern unsigned int linux_trap_ipi15_sun4m[];
+extern unsigned int linux_trap_ipi15_leon[];
extern int leon_ipi_irq;
#endif /* CONFIG_SMP */
-#endif /* __KERNEL__ */
-
#endif /* __ASSEMBLY__ */
/* macros used in leon_mm.c */
@@ -317,18 +255,4 @@
#define _pfn_valid(pfn) ((pfn < last_valid_pfn) && (pfn >= PFN(phys_base)))
#define _SRMMU_PTE_PMASK_LEON 0xffffffff
-#else /* defined(CONFIG_SPARC_LEON) */
-
-/* nop definitions for !LEON case */
-#define leon_init() do {} while (0)
-#define leon_switch_mm() do {} while (0)
-#define leon_init_IRQ() do {} while (0)
-#define init_leon() do {} while (0)
-#define leon_smp_done() do {} while (0)
-#define leon_boot_cpus() do {} while (0)
-#define leon_boot_one_cpu(i, t) 1
-#define leon_init_smp() do {} while (0)
-
-#endif /* !defined(CONFIG_SPARC_LEON) */
-
#endif
diff --git a/arch/sparc/include/asm/leon_amba.h b/arch/sparc/include/asm/leon_amba.h
index e50f326..f3034ed 100644
--- a/arch/sparc/include/asm/leon_amba.h
+++ b/arch/sparc/include/asm/leon_amba.h
@@ -87,8 +87,6 @@
#define LEON3_GPTIMER_CONFIG_NRTIMERS(c) ((c)->config & 0x7)
#define LEON3_GPTIMER_CTRL_ISPENDING(r) (((r)&LEON3_GPTIMER_CTRL_PENDING) ? 1 : 0)
-#ifdef CONFIG_SPARC_LEON
-
#ifndef __ASSEMBLY__
struct leon3_irqctrl_regs_map {
@@ -264,6 +262,4 @@
#define amba_device(x) (((x) >> 12) & 0xfff)
-#endif /* !defined(CONFIG_SPARC_LEON) */
-
#endif
diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
index cb82870..79da178 100644
--- a/arch/sparc/include/asm/pgtsrmmu.h
+++ b/arch/sparc/include/asm/pgtsrmmu.h
@@ -139,6 +139,7 @@
restore %g0, %g0, %g0;
#ifndef __ASSEMBLY__
+extern unsigned long last_valid_pfn;
/* This makes sense. Honest it does - Anton */
/* XXX Yes but it's ugly as sin. FIXME. -KMW */
@@ -148,67 +149,13 @@
#define __nocache_fix(VADDR) __va(__nocache_pa(VADDR))
/* Accessing the MMU control register. */
-static inline unsigned int srmmu_get_mmureg(void)
-{
- unsigned int retval;
- __asm__ __volatile__("lda [%%g0] %1, %0\n\t" :
- "=r" (retval) :
- "i" (ASI_M_MMUREGS));
- return retval;
-}
-
-static inline void srmmu_set_mmureg(unsigned long regval)
-{
- __asm__ __volatile__("sta %0, [%%g0] %1\n\t" : :
- "r" (regval), "i" (ASI_M_MMUREGS) : "memory");
-
-}
-
-static inline void srmmu_set_ctable_ptr(unsigned long paddr)
-{
- paddr = ((paddr >> 4) & SRMMU_CTX_PMASK);
- __asm__ __volatile__("sta %0, [%1] %2\n\t" : :
- "r" (paddr), "r" (SRMMU_CTXTBL_PTR),
- "i" (ASI_M_MMUREGS) :
- "memory");
-}
-
-static inline void srmmu_set_context(int context)
-{
- __asm__ __volatile__("sta %0, [%1] %2\n\t" : :
- "r" (context), "r" (SRMMU_CTX_REG),
- "i" (ASI_M_MMUREGS) : "memory");
-}
-
-static inline int srmmu_get_context(void)
-{
- register int retval;
- __asm__ __volatile__("lda [%1] %2, %0\n\t" :
- "=r" (retval) :
- "r" (SRMMU_CTX_REG),
- "i" (ASI_M_MMUREGS));
- return retval;
-}
-
-static inline unsigned int srmmu_get_fstatus(void)
-{
- unsigned int retval;
-
- __asm__ __volatile__("lda [%1] %2, %0\n\t" :
- "=r" (retval) :
- "r" (SRMMU_FAULT_STATUS), "i" (ASI_M_MMUREGS));
- return retval;
-}
-
-static inline unsigned int srmmu_get_faddr(void)
-{
- unsigned int retval;
-
- __asm__ __volatile__("lda [%1] %2, %0\n\t" :
- "=r" (retval) :
- "r" (SRMMU_FAULT_ADDR), "i" (ASI_M_MMUREGS));
- return retval;
-}
+unsigned int srmmu_get_mmureg(void);
+void srmmu_set_mmureg(unsigned long regval);
+void srmmu_set_ctable_ptr(unsigned long paddr);
+void srmmu_set_context(int context);
+int srmmu_get_context(void);
+unsigned int srmmu_get_fstatus(void);
+unsigned int srmmu_get_faddr(void);
/* This is guaranteed on all SRMMU's. */
static inline void srmmu_flush_whole_tlb(void)
@@ -219,23 +166,6 @@
}
-/* These flush types are not available on all chips... */
-#ifndef CONFIG_SPARC_LEON
-static inline unsigned long srmmu_hwprobe(unsigned long vaddr)
-{
- unsigned long retval;
-
- vaddr &= PAGE_MASK;
- __asm__ __volatile__("lda [%1] %2, %0\n\t" :
- "=r" (retval) :
- "r" (vaddr | 0x400), "i" (ASI_M_FLUSH_PROBE));
-
- return retval;
-}
-#else
-#define srmmu_hwprobe(addr) srmmu_swprobe(addr, 0)
-#endif
-
static inline int
srmmu_get_pte (unsigned long addr)
{
diff --git a/arch/sparc/include/asm/psr.h b/arch/sparc/include/asm/psr.h
index b8c0e5f..cee7ed9 100644
--- a/arch/sparc/include/asm/psr.h
+++ b/arch/sparc/include/asm/psr.h
@@ -35,6 +35,14 @@
#define PSR_VERS 0x0f000000 /* cpu-version field */
#define PSR_IMPL 0xf0000000 /* cpu-implementation field */
+#define PSR_VERS_SHIFT 24
+#define PSR_IMPL_SHIFT 28
+#define PSR_VERS_SHIFTED_MASK 0xf
+#define PSR_IMPL_SHIFTED_MASK 0xf
+
+#define PSR_IMPL_TI 0x4
+#define PSR_IMPL_LEON 0xf
+
#ifdef __KERNEL__
#ifndef __ASSEMBLY__
diff --git a/arch/sparc/include/asm/sections.h b/arch/sparc/include/asm/sections.h
index 0b0553b..f300d1a 100644
--- a/arch/sparc/include/asm/sections.h
+++ b/arch/sparc/include/asm/sections.h
@@ -7,4 +7,7 @@
/* sparc entry point */
extern char _start[];
+extern char __leon_1insn_patch[];
+extern char __leon_1insn_patch_end[];
+
#endif
diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
index 72308f9..6cf591b 100644
--- a/arch/sparc/kernel/Makefile
+++ b/arch/sparc/kernel/Makefile
@@ -51,8 +51,8 @@
obj-y += of_device_$(BITS).o
obj-$(CONFIG_SPARC64) += prom_irqtrans.o
-obj-$(CONFIG_SPARC_LEON)+= leon_kernel.o
-obj-$(CONFIG_SPARC_LEON)+= leon_pmc.o
+obj-$(CONFIG_SPARC32) += leon_kernel.o
+obj-$(CONFIG_SPARC32) += leon_pmc.o
obj-$(CONFIG_SPARC64) += reboot.o
obj-$(CONFIG_SPARC64) += sysfs.o
diff --git a/arch/sparc/kernel/cpu.c b/arch/sparc/kernel/cpu.c
index 2d18196..a6c94a2 100644
--- a/arch/sparc/kernel/cpu.c
+++ b/arch/sparc/kernel/cpu.c
@@ -121,7 +121,7 @@
FPU(-1, NULL)
}
},{
- 4,
+ PSR_IMPL_TI,
.cpu_info = {
CPU(0, "Texas Instruments, Inc. - SuperSparc-(II)"),
/* SparcClassic -- borned STP1010TAB-50*/
@@ -191,7 +191,7 @@
FPU(-1, NULL)
}
},{
- 0xF, /* Aeroflex Gaisler */
+ PSR_IMPL_LEON, /* Aeroflex Gaisler */
.cpu_info = {
CPU(3, "LEON"),
CPU(-1, NULL)
@@ -440,16 +440,16 @@
int psr_impl, psr_vers, fpu_vers;
int psr;
- psr_impl = ((get_psr() >> 28) & 0xf);
- psr_vers = ((get_psr() >> 24) & 0xf);
+ psr_impl = ((get_psr() >> PSR_IMPL_SHIFT) & PSR_IMPL_SHIFTED_MASK);
+ psr_vers = ((get_psr() >> PSR_VERS_SHIFT) & PSR_VERS_SHIFTED_MASK);
psr = get_psr();
put_psr(psr | PSR_EF);
-#ifdef CONFIG_SPARC_LEON
- fpu_vers = get_psr() & PSR_EF ? ((get_fsr() >> 17) & 0x7) : 7;
-#else
- fpu_vers = ((get_fsr() >> 17) & 0x7);
-#endif
+
+ if (psr_impl == PSR_IMPL_LEON)
+ fpu_vers = get_psr() & PSR_EF ? ((get_fsr() >> 17) & 0x7) : 7;
+ else
+ fpu_vers = ((get_fsr() >> 17) & 0x7);
put_psr(psr);
diff --git a/arch/sparc/kernel/entry.S b/arch/sparc/kernel/entry.S
index 2dbe180..dcaa1cf 100644
--- a/arch/sparc/kernel/entry.S
+++ b/arch/sparc/kernel/entry.S
@@ -393,7 +393,6 @@
/* FIXME */
1: b,a 1b
-#ifdef CONFIG_SPARC_LEON
.globl smpleon_ipi
.extern leon_ipi_interrupt
/* SMP per-cpu IPI interrupts are handled specially. */
@@ -424,8 +423,6 @@
b ret_trap_lockless_ipi
clr %l6
-#endif /* CONFIG_SPARC_LEON */
-
#endif /* CONFIG_SMP */
/* This routine handles illegal instructions and privileged
@@ -770,8 +767,11 @@
mov 0x400, %l5
mov 0x300, %l4
- lda [%l5] ASI_M_MMUREGS, %l6 ! read sfar first
- lda [%l4] ASI_M_MMUREGS, %l5 ! read sfsr last
+LEON_PI(lda [%l5] ASI_LEON_MMUREGS, %l6) ! read sfar first
+SUN_PI_(lda [%l5] ASI_M_MMUREGS, %l6) ! read sfar first
+
+LEON_PI(lda [%l4] ASI_LEON_MMUREGS, %l5) ! read sfsr last
+SUN_PI_(lda [%l4] ASI_M_MMUREGS, %l5) ! read sfsr last
andn %l6, 0xfff, %l6
srl %l5, 6, %l5 ! and encode all info into l7
diff --git a/arch/sparc/kernel/etrap_32.S b/arch/sparc/kernel/etrap_32.S
index 84b5f0d..e3e80d6 100644
--- a/arch/sparc/kernel/etrap_32.S
+++ b/arch/sparc/kernel/etrap_32.S
@@ -234,7 +234,8 @@
cmp %glob_tmp, %sp
bleu,a 1f
- lda [%g0] ASI_M_MMUREGS, %glob_tmp ! read MMU control
+LEON_PI( lda [%g0] ASI_LEON_MMUREGS, %glob_tmp) ! read MMU control
+SUN_PI_( lda [%g0] ASI_M_MMUREGS, %glob_tmp) ! read MMU control
trap_setup_user_stack_is_bolixed:
/* From user/kernel into invalid window w/bad user
@@ -249,18 +250,25 @@
1:
/* Clear the fault status and turn on the no_fault bit. */
or %glob_tmp, 0x2, %glob_tmp ! or in no_fault bit
- sta %glob_tmp, [%g0] ASI_M_MMUREGS ! set it
+LEON_PI(sta %glob_tmp, [%g0] ASI_LEON_MMUREGS) ! set it
+SUN_PI_(sta %glob_tmp, [%g0] ASI_M_MMUREGS) ! set it
/* Dump the registers and cross fingers. */
STORE_WINDOW(sp)
/* Clear the no_fault bit and check the status. */
andn %glob_tmp, 0x2, %glob_tmp
- sta %glob_tmp, [%g0] ASI_M_MMUREGS
+LEON_PI(sta %glob_tmp, [%g0] ASI_LEON_MMUREGS)
+SUN_PI_(sta %glob_tmp, [%g0] ASI_M_MMUREGS)
+
mov AC_M_SFAR, %glob_tmp
- lda [%glob_tmp] ASI_M_MMUREGS, %g0
+LEON_PI(lda [%glob_tmp] ASI_LEON_MMUREGS, %g0)
+SUN_PI_(lda [%glob_tmp] ASI_M_MMUREGS, %g0)
+
mov AC_M_SFSR, %glob_tmp
- lda [%glob_tmp] ASI_M_MMUREGS, %glob_tmp ! save away status of winstore
+LEON_PI(lda [%glob_tmp] ASI_LEON_MMUREGS, %glob_tmp)! save away status of winstore
+SUN_PI_(lda [%glob_tmp] ASI_M_MMUREGS, %glob_tmp) ! save away status of winstore
+
andcc %glob_tmp, 0x2, %g0 ! did we fault?
bne trap_setup_user_stack_is_bolixed ! failure
nop
diff --git a/arch/sparc/kernel/head_32.S b/arch/sparc/kernel/head_32.S
index a0f5c20..afeb1d7 100644
--- a/arch/sparc/kernel/head_32.S
+++ b/arch/sparc/kernel/head_32.S
@@ -30,10 +30,6 @@
* the cpu-type
*/
.align 4
-cputyp:
- .word 1
-
- .align 4
.globl cputypval
cputypval:
.asciz "sun4m"
@@ -46,8 +42,8 @@
.align 4
-sun4c_notsup:
- .asciz "Sparc-Linux sun4/sun4c support does no longer exist.\n\n"
+notsup:
+ .asciz "Sparc-Linux sun4/sun4c or MMU-less not supported\n\n"
.align 4
sun4e_notsup:
@@ -123,7 +119,7 @@
tst %o0
be no_sun4u_here
mov %g4, %o7 /* Previous %o7. */
-
+
mov %o0, %l0 ! stash away romvec
mov %o0, %g7 ! put it here too
mov %o1, %l1 ! stash away debug_vec too
@@ -132,7 +128,7 @@
set current_pc, %g5
cmp %g3, %g5
be already_mapped
- nop
+ nop
/* %l6 will hold the offset we have to subtract
* from absolute symbols in order to access areas
@@ -192,9 +188,9 @@
bne not_a_sun4
nop
-halt_sun4_or_sun4c:
+halt_notsup:
ld [%g7 + 0x68], %o1
- set sun4c_notsup, %o0
+ set notsup, %o0
sub %o0, %l6, %o0
call %o1
nop
@@ -202,18 +198,31 @@
nop
not_a_sun4:
- lda [%g0] ASI_M_MMUREGS, %g1
- andcc %g1, 1, %g0
- be halt_sun4_or_sun4c
+ /* It looks like this is a machine we support.
+ * Now find out what MMU we are dealing with
+ * LEON - identified by the psr.impl field
+ * Viking - identified by the psr.impl field
+ * In all other cases a sun4m srmmu.
+ * We check that the MMU is enabled in all cases.
+ */
+
+ /* Check if this is a LEON CPU */
+ rd %psr, %g3
+ srl %g3, PSR_IMPL_SHIFT, %g3
+ and %g3, PSR_IMPL_SHIFTED_MASK, %g3
+ cmp %g3, PSR_IMPL_LEON
+ be leon_remap /* It is a LEON - jump */
nop
-srmmu_remap:
- /* First, check for a viking (TI) module. */
- set 0x40000000, %g2
- rd %psr, %g3
- and %g2, %g3, %g3
- subcc %g3, 0x0, %g0
- bz srmmu_nviking
+ /* Sanity-check, is MMU enabled */
+ lda [%g0] ASI_M_MMUREGS, %g1
+ andcc %g1, 1, %g0
+ be halt_notsup
+ nop
+
+ /* Check for a viking (TI) module. */
+ cmp %g3, PSR_IMPL_TI
+ bne srmmu_not_viking
nop
/* Figure out what kind of viking we are on.
@@ -228,14 +237,14 @@
lda [%g0] ASI_M_MMUREGS, %g3 ! peek in the control reg
and %g2, %g3, %g3
subcc %g3, 0x0, %g0
- bnz srmmu_nviking ! is in mbus mode
+ bnz srmmu_not_viking ! is in mbus mode
nop
-
+
rd %psr, %g3 ! DO NOT TOUCH %g3
andn %g3, PSR_ET, %g2
wr %g2, 0x0, %psr
WRITE_PAUSE
-
+
/* Get context table pointer, then convert to
* a physical address, which is 36 bits.
*/
@@ -258,12 +267,12 @@
lda [%g4] ASI_M_BYPASS, %o1 ! This is a level 1 ptr
srl %o1, 0x4, %o1 ! Clear low 4 bits
sll %o1, 0x8, %o1 ! Make physical
-
+
/* Ok, pull in the PTD. */
lda [%o1] ASI_M_BYPASS, %o2 ! This is the 0x0 16MB pgd
/* Calculate to KERNBASE entry. */
- add %o1, KERNBASE >> (SRMMU_PGDIR_SHIFT - 2), %o3
+ add %o1, KERNBASE >> (SRMMU_PGDIR_SHIFT - 2), %o3
/* Poke the entry into the calculated address. */
sta %o2, [%o3] ASI_M_BYPASS
@@ -293,12 +302,12 @@
b go_to_highmem
nop
+srmmu_not_viking:
/* This works on viking's in Mbus mode and all
* other MBUS modules. It is virtually the same as
* the above madness sans turning traps off and flipping
* the AC bit.
*/
-srmmu_nviking:
set AC_M_CTPR, %g1
lda [%g1] ASI_M_MMUREGS, %g1 ! get ctx table ptr
sll %g1, 0x4, %g1 ! make physical addr
@@ -313,6 +322,29 @@
nop ! wheee....
+leon_remap:
+ /* Sanity-check, is MMU enabled */
+ lda [%g0] ASI_LEON_MMUREGS, %g1
+ andcc %g1, 1, %g0
+ be halt_notsup
+ nop
+
+ /* Same code as in the srmmu_not_viking case,
+ * with the LEON ASI for mmuregs
+ */
+ set AC_M_CTPR, %g1
+ lda [%g1] ASI_LEON_MMUREGS, %g1 ! get ctx table ptr
+ sll %g1, 0x4, %g1 ! make physical addr
+ lda [%g1] ASI_M_BYPASS, %g1 ! ptr to level 1 pg_table
+ srl %g1, 0x4, %g1
+ sll %g1, 0x8, %g1 ! make phys addr for l1 tbl
+
+ lda [%g1] ASI_M_BYPASS, %g2 ! get level1 entry for 0x0
+ add %g1, KERNBASE >> (SRMMU_PGDIR_SHIFT - 2), %g3
+ sta %g2, [%g3] ASI_M_BYPASS ! place at KERNBASE entry
+ b go_to_highmem
+ nop ! wheee....
+
/* Now do a non-relative jump so that PC is in high-memory */
go_to_highmem:
set execute_in_high_mem, %g1
@@ -336,8 +368,9 @@
sethi %hi(linux_dbvec), %g1
st %o1, [%g1 + %lo(linux_dbvec)]
-/* Get the machine type via the mysterious romvec node operations. */
-
+ /* Get the machine type via the romvec
+ * getprops node operation
+ */
add %g7, 0x1c, %l1
ld [%l1], %l0
ld [%l0], %l0
@@ -356,9 +389,42 @@
! to a buf where above string
! will get stored by the prom.
-#ifdef CONFIG_SPARC_LEON
- /* no cpu-type check is needed, it is a SPARC-LEON */
+ /* Check value of "compatible" property.
+ * "value" => "model"
+ * leon => sparc_leon
+ * sun4m => sun4m
+ * sun4s => sun4m
+ * sun4d => sun4d
+ * sun4e => "no_sun4e_here"
+ * '*' => "no_sun4u_here"
+ * Check single letters only
+ */
+
+ set cputypval, %o2
+ /* If cputypval[0] == 'l' (lower case letter L) this is leon */
+ ldub [%o2], %l1
+ cmp %l1, 'l'
+ be leon_init
+ nop
+
+ /* Check cputypval[4] to find the sun model */
+ ldub [%o2 + 0x4], %l1
+
+ cmp %l1, 'm'
+ be sun4m_init
+ cmp %l1, 's'
+ be sun4m_init
+ cmp %l1, 'd'
+ be sun4d_init
+ cmp %l1, 'e'
+ be no_sun4e_here ! Could be a sun4e.
+ nop
+ b no_sun4u_here ! AIEEE, a V9 sun4u... Get our BIG BROTHER kernel :))
+ nop
+
+leon_init:
+ /* LEON CPU - set boot_cpu_id */
sethi %hi(boot_cpu_id), %g2 ! boot-cpu index
#ifdef CONFIG_SMP
@@ -376,26 +442,6 @@
ba continue_boot
nop
-#endif
-
-/* Check to cputype. We may be booted on a sun4u (64 bit box),
- * and sun4d needs special treatment.
- */
-
- set cputypval, %o2
- ldub [%o2 + 0x4], %l1
-
- cmp %l1, 'm'
- be sun4m_init
- cmp %l1, 's'
- be sun4m_init
- cmp %l1, 'd'
- be sun4d_init
- cmp %l1, 'e'
- be no_sun4e_here ! Could be a sun4e.
- nop
- b no_sun4u_here ! AIEEE, a V9 sun4u... Get our BIG BROTHER kernel :))
- nop
/* CPUID in bootbus can be found at PA 0xff0140000 */
#define SUN4D_BOOTBUS_CPUID 0xf0140000
@@ -431,9 +477,9 @@
/* This sucks, apparently this makes Vikings call prom panic, will fix later */
2:
rd %psr, %o1
- srl %o1, 28, %o1 ! Get a type of the CPU
+ srl %o1, PSR_IMPL_SHIFT, %o1 ! Get a type of the CPU
- subcc %o1, 4, %g0 ! TI: Viking or MicroSPARC
+ subcc %o1, PSR_IMPL_TI, %g0 ! TI: Viking or MicroSPARC
be continue_boot
nop
@@ -459,10 +505,6 @@
/* Aieee, now set PC and nPC, enable traps, give ourselves a stack and it's
* show-time!
*/
-
- sethi %hi(cputyp), %o0
- st %g4, [%o0 + %lo(cputyp)]
-
/* Turn on Supervisor, EnableFloating, and all the PIL bits.
* Also puts us in register window zero with traps off.
*/
@@ -480,7 +522,7 @@
set __bss_start , %o0 ! First address of BSS
set _end , %o1 ! Last address of BSS
add %o0, 0x1, %o0
-1:
+1:
stb %g0, [%o0]
subcc %o0, %o1, %g0
bl 1b
@@ -546,7 +588,7 @@
set dest, %g2; \
ld [%g5], %g4; \
st %g4, [%g2];
-
+
/* Patch for window spills... */
PATCH_INSN(spnwin_patch1_7win, spnwin_patch1)
PATCH_INSN(spnwin_patch2_7win, spnwin_patch2)
@@ -597,7 +639,7 @@
st %g4, [%g5 + 0x18]
st %g4, [%g5 + 0x1c]
-2:
+2:
sethi %hi(nwindows), %g4
st %g3, [%g4 + %lo(nwindows)] ! store final value
sub %g3, 0x1, %g3
@@ -617,18 +659,12 @@
wr %g3, PSR_ET, %psr
WRITE_PAUSE
- /* First we call prom_init() to set up PROMLIB, then
- * off to start_kernel().
- */
-
+ /* Call sparc32_start_kernel(struct linux_romvec *rp) */
sethi %hi(prom_vector_p), %g5
ld [%g5 + %lo(prom_vector_p)], %o0
- call prom_init
+ call sparc32_start_kernel
nop
- call start_kernel
- nop
-
/* We should not get here. */
call halt_me
nop
@@ -659,7 +695,7 @@
.asciz "write"
.align 4
sun4u_6:
- .asciz "\n\rOn sun4u you have to use UltraLinux (64bit) kernel\n\rand not a 32bit sun4[cdem] version\n\r\n\r"
+ .asciz "\n\rOn sun4u you have to use sparc64 kernel\n\rand not a sparc32 version\n\r\n\r"
sun4u_6e:
.align 4
sun4u_7:
diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c
index a2846f5..0f094db 100644
--- a/arch/sparc/kernel/ioport.c
+++ b/arch/sparc/kernel/ioport.c
@@ -55,17 +55,13 @@
/* This function must make sure that caches and memory are coherent after DMA
* On LEON systems without cache snooping it flushes the entire D-CACHE.
*/
-#ifndef CONFIG_SPARC_LEON
static inline void dma_make_coherent(unsigned long pa, unsigned long len)
{
+ if (sparc_cpu_model == sparc_leon) {
+ if (!sparc_leon3_snooping_enabled())
+ leon_flush_dcache_all();
+ }
}
-#else
-static inline void dma_make_coherent(unsigned long pa, unsigned long len)
-{
- if (!sparc_leon3_snooping_enabled())
- leon_flush_dcache_all();
-}
-#endif
static void __iomem *_sparc_ioremap(struct resource *res, u32 bus, u32 pa, int sz);
static void __iomem *_sparc_alloc_io(unsigned int busno, unsigned long phys,
@@ -427,9 +423,6 @@
#endif /* CONFIG_SBUS */
-/* LEON reuses PCI DMA ops */
-#if defined(CONFIG_PCI) || defined(CONFIG_SPARC_LEON)
-
/* Allocate and map kernel buffer using consistent mode DMA for a device.
* hwdev should be valid struct pci_dev pointer for PCI devices.
*/
@@ -657,14 +650,11 @@
};
EXPORT_SYMBOL(pci32_dma_ops);
-#endif /* CONFIG_PCI || CONFIG_SPARC_LEON */
+/* leon re-uses pci32_dma_ops */
+struct dma_map_ops *leon_dma_ops = &pci32_dma_ops;
+EXPORT_SYMBOL(leon_dma_ops);
-#ifdef CONFIG_SPARC_LEON
-struct dma_map_ops *dma_ops = &pci32_dma_ops;
-#elif defined(CONFIG_SBUS)
struct dma_map_ops *dma_ops = &sbus_dma_ops;
-#endif
-
EXPORT_SYMBOL(dma_ops);
diff --git a/arch/sparc/kernel/irq_32.c b/arch/sparc/kernel/irq_32.c
index ae04914..c145f6f 100644
--- a/arch/sparc/kernel/irq_32.c
+++ b/arch/sparc/kernel/irq_32.c
@@ -241,9 +241,6 @@
unsigned int cpu_irq;
int err;
-#if defined CONFIG_SMP && !defined CONFIG_SPARC_LEON
- struct tt_entry *trap_table;
-#endif
err = request_irq(irq, irq_handler, 0, "floppy", NULL);
if (err)
@@ -264,13 +261,18 @@
table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_four = SPARC_NOP;
INSTANTIATE(sparc_ttable)
-#if defined CONFIG_SMP && !defined CONFIG_SPARC_LEON
- trap_table = &trapbase_cpu1;
- INSTANTIATE(trap_table)
- trap_table = &trapbase_cpu2;
- INSTANTIATE(trap_table)
- trap_table = &trapbase_cpu3;
- INSTANTIATE(trap_table)
+
+#if defined CONFIG_SMP
+ if (sparc_cpu_model != sparc_leon) {
+ struct tt_entry *trap_table;
+
+ trap_table = &trapbase_cpu1;
+ INSTANTIATE(trap_table)
+ trap_table = &trapbase_cpu2;
+ INSTANTIATE(trap_table)
+ trap_table = &trapbase_cpu3;
+ INSTANTIATE(trap_table)
+ }
#endif
#undef INSTANTIATE
/*
diff --git a/arch/sparc/kernel/kernel.h b/arch/sparc/kernel/kernel.h
index a86372d..291bb5d 100644
--- a/arch/sparc/kernel/kernel.h
+++ b/arch/sparc/kernel/kernel.h
@@ -26,6 +26,9 @@
#endif
#ifdef CONFIG_SPARC32
+/* setup_32.c */
+void sparc32_start_kernel(struct linux_romvec *rp);
+
/* cpu.c */
extern void cpu_probe(void);
diff --git a/arch/sparc/kernel/leon_kernel.c b/arch/sparc/kernel/leon_kernel.c
index 77c1b91..e34e2c4 100644
--- a/arch/sparc/kernel/leon_kernel.c
+++ b/arch/sparc/kernel/leon_kernel.c
@@ -23,6 +23,7 @@
#include <asm/smp.h>
#include <asm/setup.h>
+#include "kernel.h"
#include "prom.h"
#include "irq.h"
diff --git a/arch/sparc/kernel/leon_pmc.c b/arch/sparc/kernel/leon_pmc.c
index 519ca92..4e17432 100644
--- a/arch/sparc/kernel/leon_pmc.c
+++ b/arch/sparc/kernel/leon_pmc.c
@@ -7,6 +7,7 @@
#include <linux/pm.h>
#include <asm/leon_amba.h>
+#include <asm/cpu_type.h>
#include <asm/leon.h>
/* List of Systems that need fixup instructions around power-down instruction */
@@ -65,13 +66,15 @@
/* Install LEON Power Down function */
static int __init leon_pmc_install(void)
{
- /* Assign power management IDLE handler */
- if (pmc_leon_need_fixup())
- pm_idle = pmc_leon_idle_fixup;
- else
- pm_idle = pmc_leon_idle;
+ if (sparc_cpu_model == sparc_leon) {
+ /* Assign power management IDLE handler */
+ if (pmc_leon_need_fixup())
+ pm_idle = pmc_leon_idle_fixup;
+ else
+ pm_idle = pmc_leon_idle;
- printk(KERN_INFO "leon: power management initialized\n");
+ printk(KERN_INFO "leon: power management initialized\n");
+ }
return 0;
}
diff --git a/arch/sparc/kernel/leon_smp.c b/arch/sparc/kernel/leon_smp.c
index a469090..0f3fb6d 100644
--- a/arch/sparc/kernel/leon_smp.c
+++ b/arch/sparc/kernel/leon_smp.c
@@ -48,15 +48,13 @@
#include "kernel.h"
-#ifdef CONFIG_SPARC_LEON
-
#include "irq.h"
extern ctxd_t *srmmu_ctx_table_phys;
static int smp_processors_ready;
extern volatile unsigned long cpu_callin_map[NR_CPUS];
extern cpumask_t smp_commenced_mask;
-void __init leon_configure_cache_smp(void);
+void __cpuinit leon_configure_cache_smp(void);
static void leon_ipi_init(void);
/* IRQ number of LEON IPIs */
@@ -123,7 +121,7 @@
extern struct linux_prom_registers smp_penguin_ctable;
-void __init leon_configure_cache_smp(void)
+void __cpuinit leon_configure_cache_smp(void)
{
unsigned long cfg = sparc_leon3_get_dcachecfg();
int me = smp_processor_id();
@@ -507,5 +505,3 @@
sparc32_ipi_ops = &leon_ipi_ops;
}
-
-#endif /* CONFIG_SPARC_LEON */
diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
index fe6787c..cb36e82 100644
--- a/arch/sparc/kernel/process_32.c
+++ b/arch/sparc/kernel/process_32.c
@@ -65,50 +65,25 @@
struct task_struct *last_task_used_math = NULL;
struct thread_info *current_set[NR_CPUS];
-#ifndef CONFIG_SMP
-
/*
* the idle loop on a Sparc... ;)
*/
void cpu_idle(void)
{
+ set_thread_flag(TIF_POLLING_NRFLAG);
+
/* endless idle loop with no priority at all */
for (;;) {
- if (pm_idle) {
- while (!need_resched())
+ while (!need_resched()) {
+ if (pm_idle)
(*pm_idle)();
- } else {
- while (!need_resched())
+ else
cpu_relax();
}
schedule_preempt_disabled();
}
}
-#else
-
-/* This is being executed in task 0 'user space'. */
-void cpu_idle(void)
-{
- set_thread_flag(TIF_POLLING_NRFLAG);
- /* endless idle loop with no priority at all */
- while(1) {
-#ifdef CONFIG_SPARC_LEON
- if (pm_idle) {
- while (!need_resched())
- (*pm_idle)();
- } else
-#endif
- {
- while (!need_resched())
- cpu_relax();
- }
- schedule_preempt_disabled();
- }
-}
-
-#endif
-
/* XXX cli/sti -> local_irq_xxx here, check this works once SMP is fixed. */
void machine_halt(void)
{
diff --git a/arch/sparc/kernel/prom_common.c b/arch/sparc/kernel/prom_common.c
index 741df91..1303021 100644
--- a/arch/sparc/kernel/prom_common.c
+++ b/arch/sparc/kernel/prom_common.c
@@ -23,7 +23,6 @@
#include <linux/of_pdt.h>
#include <asm/prom.h>
#include <asm/oplib.h>
-#include <asm/leon.h>
#include "prom.h"
diff --git a/arch/sparc/kernel/rtrap_32.S b/arch/sparc/kernel/rtrap_32.S
index 7abc24e..6c34de0 100644
--- a/arch/sparc/kernel/rtrap_32.S
+++ b/arch/sparc/kernel/rtrap_32.S
@@ -231,11 +231,14 @@
cmp %g1, %fp
bleu ret_trap_user_stack_is_bolixed
mov AC_M_SFSR, %g1
- lda [%g1] ASI_M_MMUREGS, %g0
+LEON_PI(lda [%g1] ASI_LEON_MMUREGS, %g0)
+SUN_PI_(lda [%g1] ASI_M_MMUREGS, %g0)
- lda [%g0] ASI_M_MMUREGS, %g1
+LEON_PI(lda [%g0] ASI_LEON_MMUREGS, %g1)
+SUN_PI_(lda [%g0] ASI_M_MMUREGS, %g1)
or %g1, 0x2, %g1
- sta %g1, [%g0] ASI_M_MMUREGS
+LEON_PI(sta %g1, [%g0] ASI_LEON_MMUREGS)
+SUN_PI_(sta %g1, [%g0] ASI_M_MMUREGS)
restore %g0, %g0, %g0
@@ -244,13 +247,16 @@
save %g0, %g0, %g0
andn %g1, 0x2, %g1
- sta %g1, [%g0] ASI_M_MMUREGS
+LEON_PI(sta %g1, [%g0] ASI_LEON_MMUREGS)
+SUN_PI_(sta %g1, [%g0] ASI_M_MMUREGS)
mov AC_M_SFAR, %g2
- lda [%g2] ASI_M_MMUREGS, %g2
+LEON_PI(lda [%g2] ASI_LEON_MMUREGS, %g2)
+SUN_PI_(lda [%g2] ASI_M_MMUREGS, %g2)
mov AC_M_SFSR, %g1
- lda [%g1] ASI_M_MMUREGS, %g1
+LEON_PI(lda [%g1] ASI_LEON_MMUREGS, %g1)
+SUN_PI_(lda [%g1] ASI_M_MMUREGS, %g1)
andcc %g1, 0x2, %g0
be ret_trap_userwins_ok
nop
diff --git a/arch/sparc/kernel/setup_32.c b/arch/sparc/kernel/setup_32.c
index c052313..efe3e64 100644
--- a/arch/sparc/kernel/setup_32.c
+++ b/arch/sparc/kernel/setup_32.c
@@ -32,6 +32,7 @@
#include <linux/cpu.h>
#include <linux/kdebug.h>
#include <linux/export.h>
+#include <linux/start_kernel.h>
#include <asm/io.h>
#include <asm/processor.h>
@@ -45,6 +46,7 @@
#include <asm/cpudata.h>
#include <asm/setup.h>
#include <asm/cacheflush.h>
+#include <asm/sections.h>
#include "kernel.h"
@@ -237,13 +239,62 @@
}
}
+struct leon_1insn_patch_entry {
+ unsigned int addr;
+ unsigned int insn;
+};
+
enum sparc_cpu sparc_cpu_model;
EXPORT_SYMBOL(sparc_cpu_model);
-struct tt_entry *sparc_ttable;
+static __init void leon_patch(void)
+{
+ struct leon_1insn_patch_entry *start = (void *)__leon_1insn_patch;
+ struct leon_1insn_patch_entry *end = (void *)__leon_1insn_patch_end;
+ /* Default instruction is leon - no patching */
+ if (sparc_cpu_model == sparc_leon)
+ return;
+
+ while (start < end) {
+ unsigned long addr = start->addr;
+
+ *(unsigned int *)(addr) = start->insn;
+ flushi(addr);
+
+ start++;
+ }
+}
+
+struct tt_entry *sparc_ttable;
struct pt_regs fake_swapper_regs;
+/* Called from head_32.S - before we have setup anything
+ * in the kernel. Be very careful with what you do here.
+ */
+void __init sparc32_start_kernel(struct linux_romvec *rp)
+{
+ prom_init(rp);
+
+ /* Set sparc_cpu_model */
+ sparc_cpu_model = sun_unknown;
+ if (!strcmp(&cputypval[0], "sun4m"))
+ sparc_cpu_model = sun4m;
+ if (!strcmp(&cputypval[0], "sun4s"))
+ sparc_cpu_model = sun4m; /* CP-1200 with PROM 2.30 -E */
+ if (!strcmp(&cputypval[0], "sun4d"))
+ sparc_cpu_model = sun4d;
+ if (!strcmp(&cputypval[0], "sun4e"))
+ sparc_cpu_model = sun4e;
+ if (!strcmp(&cputypval[0], "sun4u"))
+ sparc_cpu_model = sun4u;
+ if (!strncmp(&cputypval[0], "leon" , 4))
+ sparc_cpu_model = sparc_leon;
+
+ leon_patch();
+ start_kernel();
+}
+
void __init setup_arch(char **cmdline_p)
{
int i;
@@ -260,21 +311,6 @@
register_console(&prom_early_console);
- /* Set sparc_cpu_model */
- sparc_cpu_model = sun_unknown;
- if (!strcmp(&cputypval[0], "sun4m"))
- sparc_cpu_model = sun4m;
- if (!strcmp(&cputypval[0], "sun4s"))
- sparc_cpu_model = sun4m; /* CP-1200 with PROM 2.30 -E */
- if (!strcmp(&cputypval[0], "sun4d"))
- sparc_cpu_model = sun4d;
- if (!strcmp(&cputypval[0], "sun4e"))
- sparc_cpu_model = sun4e;
- if (!strcmp(&cputypval[0], "sun4u"))
- sparc_cpu_model = sun4u;
- if (!strncmp(&cputypval[0], "leon" , 4))
- sparc_cpu_model = sparc_leon;
-
printk("ARCH: ");
switch(sparc_cpu_model) {
case sun4m:
diff --git a/arch/sparc/kernel/trampoline_32.S b/arch/sparc/kernel/trampoline_32.S
index 7364ddc..af27aca 100644
--- a/arch/sparc/kernel/trampoline_32.S
+++ b/arch/sparc/kernel/trampoline_32.S
@@ -149,8 +149,6 @@
b,a smp_do_cpu_idle
-#ifdef CONFIG_SPARC_LEON
-
__CPUINIT
.align 4
.global leon_smp_cpu_startup, smp_penguin_ctable
@@ -161,7 +159,7 @@
ld [%g1+4],%g1
srl %g1,4,%g1
set 0x00000100,%g5 /* SRMMU_CTXTBL_PTR */
- sta %g1, [%g5] ASI_M_MMUREGS
+ sta %g1, [%g5] ASI_LEON_MMUREGS
/* Set up a sane %psr -- PIL<0xf> S<0x1> PS<0x1> CWP<0x0> */
set (PSR_PIL | PSR_S | PSR_PS), %g1
@@ -207,5 +205,3 @@
nop
b,a smp_do_cpu_idle
-
-#endif
diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
index c72fdf5..3b05e66 100644
--- a/arch/sparc/kernel/traps_64.c
+++ b/arch/sparc/kernel/traps_64.c
@@ -2054,7 +2054,7 @@
do_fpe_common(regs);
}
-extern int do_mathemu(struct pt_regs *, struct fpustate *);
+extern int do_mathemu(struct pt_regs *, struct fpustate *, bool);
void do_fpother(struct pt_regs *regs)
{
@@ -2068,7 +2068,7 @@
switch ((current_thread_info()->xfsr[0] & 0x1c000)) {
case (2 << 14): /* unfinished_FPop */
case (3 << 14): /* unimplemented_FPop */
- ret = do_mathemu(regs, f);
+ ret = do_mathemu(regs, f, false);
break;
}
if (ret)
@@ -2308,10 +2308,12 @@
} else {
struct fpustate *f = FPUSTATE;
- /* XXX maybe verify XFSR bits like
- * XXX do_fpother() does?
+ /* On UltraSPARC T2 and later, FPU insns which
+ * are not implemented in HW signal an illegal
+ * instruction trap and do not set the FP Trap
+ * Trap in the %fsr to unimplemented_FPop.
*/
- if (do_mathemu(regs, f))
+ if (do_mathemu(regs, f, true))
return;
}
}
diff --git a/arch/sparc/kernel/vmlinux.lds.S b/arch/sparc/kernel/vmlinux.lds.S
index 0e16056..89c2c29 100644
--- a/arch/sparc/kernel/vmlinux.lds.S
+++ b/arch/sparc/kernel/vmlinux.lds.S
@@ -107,6 +107,11 @@
*(.sun4v_2insn_patch)
__sun4v_2insn_patch_end = .;
}
+ .leon_1insn_patch : {
+ __leon_1insn_patch = .;
+ *(.leon_1insn_patch)
+ __leon_1insn_patch_end = .;
+ }
.swapper_tsb_phys_patch : {
__swapper_tsb_phys_patch = .;
*(.swapper_tsb_phys_patch)
diff --git a/arch/sparc/kernel/wof.S b/arch/sparc/kernel/wof.S
index 4c2de3c..28a7bc6 100644
--- a/arch/sparc/kernel/wof.S
+++ b/arch/sparc/kernel/wof.S
@@ -332,24 +332,30 @@
mov AC_M_SFSR, %glob_tmp
/* Clear the fault status and turn on the no_fault bit. */
- lda [%glob_tmp] ASI_M_MMUREGS, %g0 ! eat SFSR
+LEON_PI(lda [%glob_tmp] ASI_LEON_MMUREGS, %g0) ! eat SFSR
+SUN_PI_(lda [%glob_tmp] ASI_M_MMUREGS, %g0) ! eat SFSR
- lda [%g0] ASI_M_MMUREGS, %glob_tmp ! read MMU control
+LEON_PI(lda [%g0] ASI_LEON_MMUREGS, %glob_tmp) ! read MMU control
+SUN_PI_(lda [%g0] ASI_M_MMUREGS, %glob_tmp) ! read MMU control
or %glob_tmp, 0x2, %glob_tmp ! or in no_fault bit
- sta %glob_tmp, [%g0] ASI_M_MMUREGS ! set it
+LEON_PI(sta %glob_tmp, [%g0] ASI_LEON_MMUREGS) ! set it
+SUN_PI_(sta %glob_tmp, [%g0] ASI_M_MMUREGS) ! set it
/* Dump the registers and cross fingers. */
STORE_WINDOW(sp)
/* Clear the no_fault bit and check the status. */
andn %glob_tmp, 0x2, %glob_tmp
- sta %glob_tmp, [%g0] ASI_M_MMUREGS
+LEON_PI(sta %glob_tmp, [%g0] ASI_LEON_MMUREGS)
+SUN_PI_(sta %glob_tmp, [%g0] ASI_M_MMUREGS)
mov AC_M_SFAR, %glob_tmp
- lda [%glob_tmp] ASI_M_MMUREGS, %g0
+LEON_PI(lda [%glob_tmp] ASI_LEON_MMUREGS, %g0)
+SUN_PI_(lda [%glob_tmp] ASI_M_MMUREGS, %g0)
mov AC_M_SFSR, %glob_tmp
- lda [%glob_tmp] ASI_M_MMUREGS, %glob_tmp
+LEON_PI(lda [%glob_tmp] ASI_LEON_MMUREGS, %glob_tmp)
+SUN_PI_(lda [%glob_tmp] ASI_M_MMUREGS, %glob_tmp)
andcc %glob_tmp, 0x2, %g0 ! did we fault?
be,a spwin_finish_up + 0x4 ! cool beans, success
restore %g0, %g0, %g0
diff --git a/arch/sparc/kernel/wuf.S b/arch/sparc/kernel/wuf.S
index 9fde91a..2c21cc59 100644
--- a/arch/sparc/kernel/wuf.S
+++ b/arch/sparc/kernel/wuf.S
@@ -254,16 +254,19 @@
mov AC_M_SFSR, %l4
cmp %l5, %sp
bleu fwin_user_stack_is_bolixed
- lda [%l4] ASI_M_MMUREGS, %g0 ! clear fault status
+LEON_PI( lda [%l4] ASI_LEON_MMUREGS, %g0) ! clear fault status
+SUN_PI_( lda [%l4] ASI_M_MMUREGS, %g0) ! clear fault status
/* The technique is, turn off faults on this processor,
* just let the load rip, then check the sfsr to see if
* a fault did occur. Then we turn on fault traps again
* and branch conditionally based upon what happened.
*/
- lda [%g0] ASI_M_MMUREGS, %l5 ! read mmu-ctrl reg
+LEON_PI(lda [%g0] ASI_LEON_MMUREGS, %l5) ! read mmu-ctrl reg
+SUN_PI_(lda [%g0] ASI_M_MMUREGS, %l5) ! read mmu-ctrl reg
or %l5, 0x2, %l5 ! turn on no-fault bit
- sta %l5, [%g0] ASI_M_MMUREGS ! store it
+LEON_PI(sta %l5, [%g0] ASI_LEON_MMUREGS) ! store it
+SUN_PI_(sta %l5, [%g0] ASI_M_MMUREGS) ! store it
/* Cross fingers and go for it. */
LOAD_WINDOW(sp)
@@ -275,18 +278,22 @@
/* LOCATION: Window 'T' */
- lda [%g0] ASI_M_MMUREGS, %twin_tmp1 ! load mmu-ctrl again
- andn %twin_tmp1, 0x2, %twin_tmp1 ! clear no-fault bit
- sta %twin_tmp1, [%g0] ASI_M_MMUREGS ! store it
+LEON_PI(lda [%g0] ASI_LEON_MMUREGS, %twin_tmp1) ! load mmu-ctrl again
+SUN_PI_(lda [%g0] ASI_M_MMUREGS, %twin_tmp1) ! load mmu-ctrl again
+ andn %twin_tmp1, 0x2, %twin_tmp1 ! clear no-fault bit
+LEON_PI(sta %twin_tmp1, [%g0] ASI_LEON_MMUREGS) ! store it
+SUN_PI_(sta %twin_tmp1, [%g0] ASI_M_MMUREGS) ! store it
mov AC_M_SFAR, %twin_tmp2
- lda [%twin_tmp2] ASI_M_MMUREGS, %g0 ! read fault address
+LEON_PI(lda [%twin_tmp2] ASI_LEON_MMUREGS, %g0) ! read fault address
+SUN_PI_(lda [%twin_tmp2] ASI_M_MMUREGS, %g0) ! read fault address
mov AC_M_SFSR, %twin_tmp2
- lda [%twin_tmp2] ASI_M_MMUREGS, %twin_tmp2 ! read fault status
- andcc %twin_tmp2, 0x2, %g0 ! did fault occur?
+LEON_PI(lda [%twin_tmp2] ASI_LEON_MMUREGS, %twin_tmp2) ! read fault status
+SUN_PI_(lda [%twin_tmp2] ASI_M_MMUREGS, %twin_tmp2) ! read fault status
+ andcc %twin_tmp2, 0x2, %g0 ! did fault occur?
- bne 1f ! yep, cleanup
+ bne 1f ! yep, cleanup
nop
wr %t_psr, 0x0, %psr
diff --git a/arch/sparc/math-emu/math_64.c b/arch/sparc/math-emu/math_64.c
index 2bbe2f2..1704068 100644
--- a/arch/sparc/math-emu/math_64.c
+++ b/arch/sparc/math-emu/math_64.c
@@ -163,7 +163,7 @@
u64 q[2];
} *argp;
-int do_mathemu(struct pt_regs *regs, struct fpustate *f)
+int do_mathemu(struct pt_regs *regs, struct fpustate *f, bool illegal_insn_trap)
{
unsigned long pc = regs->tpc;
unsigned long tstate = regs->tstate;
@@ -218,7 +218,7 @@
case FSQRTS: {
unsigned long x = current_thread_info()->xfsr[0];
- x = (x >> 14) & 0xf;
+ x = (x >> 14) & 0x7;
TYPE(x,1,1,1,1,0,0);
break;
}
@@ -226,7 +226,7 @@
case FSQRTD: {
unsigned long x = current_thread_info()->xfsr[0];
- x = (x >> 14) & 0xf;
+ x = (x >> 14) & 0x7;
TYPE(x,2,1,2,1,0,0);
break;
}
@@ -357,9 +357,17 @@
if (type) {
argp rs1 = NULL, rs2 = NULL, rd = NULL;
- freg = (current_thread_info()->xfsr[0] >> 14) & 0xf;
- if (freg != (type >> 9))
- goto err;
+ /* Starting with UltraSPARC-T2, the cpu does not set the FP Trap
+ * Type field in the %fsr to unimplemented_FPop. Nor does it
+ * use the fp_exception_other trap. Instead it signals an
+ * illegal instruction and leaves the FP trap type field of
+ * the %fsr unchanged.
+ */
+ if (!illegal_insn_trap) {
+ int ftt = (current_thread_info()->xfsr[0] >> 14) & 0x7;
+ if (ftt != (type >> 9))
+ goto err;
+ }
current_thread_info()->xfsr[0] &= ~0x1c000;
freg = ((insn >> 14) & 0x1f);
switch (type & 0x3) {
diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
index 69ffd31..30c3ecc 100644
--- a/arch/sparc/mm/Makefile
+++ b/arch/sparc/mm/Makefile
@@ -8,8 +8,9 @@
obj-y += fault_$(BITS).o
obj-y += init_$(BITS).o
obj-$(CONFIG_SPARC32) += extable.o srmmu.o iommu.o io-unit.o
+obj-$(CONFIG_SPARC32) += srmmu_access.o
obj-$(CONFIG_SPARC32) += hypersparc.o viking.o tsunami.o swift.o
-obj-$(CONFIG_SPARC_LEON)+= leon_mm.o
+obj-$(CONFIG_SPARC32) += leon_mm.o
# Only used by sparc64
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
diff --git a/arch/sparc/mm/leon_mm.c b/arch/sparc/mm/leon_mm.c
index 4c67ae6..5bed085 100644
--- a/arch/sparc/mm/leon_mm.c
+++ b/arch/sparc/mm/leon_mm.c
@@ -32,7 +32,7 @@
}
-unsigned long srmmu_swprobe(unsigned long vaddr, unsigned long *paddr)
+unsigned long leon_swprobe(unsigned long vaddr, unsigned long *paddr)
{
unsigned int ctxtbl;
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
index 256db6b..62e3f57 100644
--- a/arch/sparc/mm/srmmu.c
+++ b/arch/sparc/mm/srmmu.c
@@ -646,6 +646,23 @@
}
}
+/* These flush types are not available on all chips... */
+static inline unsigned long srmmu_probe(unsigned long vaddr)
+{
+ unsigned long retval;
+
+ if (sparc_cpu_model != sparc_leon) {
+
+ vaddr &= PAGE_MASK;
+ __asm__ __volatile__("lda [%1] %2, %0\n\t" :
+ "=r" (retval) :
+ "r" (vaddr | 0x400), "i" (ASI_M_FLUSH_PROBE));
+ } else {
+ retval = leon_swprobe(vaddr, 0);
+ }
+ return retval;
+}
+
/*
* This is much cleaner than poking around physical address space
* looking at the prom's page table directly which is what most
@@ -665,7 +682,7 @@
break; /* probably wrap around */
if(start == 0xfef00000)
start = KADB_DEBUGGER_BEGVM;
- if(!(prompte = srmmu_hwprobe(start))) {
+ if(!(prompte = srmmu_probe(start))) {
start += PAGE_SIZE;
continue;
}
@@ -674,12 +691,12 @@
what = 0;
if(!(start & ~(SRMMU_REAL_PMD_MASK))) {
- if(srmmu_hwprobe((start-PAGE_SIZE) + SRMMU_REAL_PMD_SIZE) == prompte)
+ if(srmmu_probe((start-PAGE_SIZE) + SRMMU_REAL_PMD_SIZE) == prompte)
what = 1;
}
if(!(start & ~(SRMMU_PGDIR_MASK))) {
- if(srmmu_hwprobe((start-PAGE_SIZE) + SRMMU_PGDIR_SIZE) ==
+ if(srmmu_probe((start-PAGE_SIZE) + SRMMU_PGDIR_SIZE) ==
prompte)
what = 2;
}
@@ -1156,7 +1173,7 @@
#ifdef TURBOSPARC_WRITEBACK
volatile unsigned long clear;
- if (srmmu_hwprobe(page))
+ if (srmmu_probe(page))
turbosparc_flush_page_cache(page);
clear = srmmu_get_fstatus();
#endif
diff --git a/arch/sparc/mm/srmmu_access.S b/arch/sparc/mm/srmmu_access.S
new file mode 100644
index 0000000..d0a67b2
--- /dev/null
+++ b/arch/sparc/mm/srmmu_access.S
@@ -0,0 +1,82 @@
+/* Assembler variants of srmmu access functions.
+ * Implemented in assembler to allow run-time patching.
+ * LEON uses a different ASI for MMUREGS than SUN.
+ *
+ * The leon_1insn_patch infrastructure is used
+ * for the run-time patching.
+ */
+
+#include <linux/linkage.h>
+
+#include <asm/asmmacro.h>
+#include <asm/pgtsrmmu.h>
+#include <asm/asi.h>
+
+/* unsigned int srmmu_get_mmureg(void) */
+ENTRY(srmmu_get_mmureg)
+LEON_PI(lda [%g0] ASI_LEON_MMUREGS, %o0)
+SUN_PI_(lda [%g0] ASI_M_MMUREGS, %o0)
+ retl
+ nop
+ENDPROC(srmmu_get_mmureg)
+
+/* void srmmu_set_mmureg(unsigned long regval) */
+ENTRY(srmmu_set_mmureg)
+LEON_PI(sta %o0, [%g0] ASI_LEON_MMUREGS)
+SUN_PI_(sta %o0, [%g0] ASI_M_MMUREGS)
+ retl
+ nop
+ENDPROC(srmmu_set_mmureg)
+
+/* void srmmu_set_ctable_ptr(unsigned long paddr) */
+ENTRY(srmmu_set_ctable_ptr)
+ /* paddr = ((paddr >> 4) & SRMMU_CTX_PMASK); */
+ srl %o0, 4, %g1
+ and %g1, SRMMU_CTX_PMASK, %g1
+
+ mov SRMMU_CTXTBL_PTR, %g2
+LEON_PI(sta %g1, [%g2] ASI_LEON_MMUREGS)
+SUN_PI_(sta %g1, [%g2] ASI_M_MMUREGS)
+ retl
+ nop
+ENDPROC(srmmu_set_ctable_ptr)
+
+
+/* void srmmu_set_context(int context) */
+ENTRY(srmmu_set_context)
+ mov SRMMU_CTX_REG, %g1
+LEON_PI(sta %o0, [%g1] ASI_LEON_MMUREGS)
+SUN_PI_(sta %o0, [%g1] ASI_M_MMUREGS)
+ retl
+ nop
+ENDPROC(srmmu_set_context)
+
+
+/* int srmmu_get_context(void) */
+ENTRY(srmmu_get_context)
+ mov SRMMU_CTX_REG, %o0
+LEON_PI(lda [%o0] ASI_LEON_MMUREGS, %o0)
+SUN_PI_(lda [%o0] ASI_M_MMUREGS, %o0)
+ retl
+ nop
+ENDPROC(srmmu_get_context)
+
+
+/* unsigned int srmmu_get_fstatus(void) */
+ENTRY(srmmu_get_fstatus)
+ mov SRMMU_FAULT_STATUS, %o0
+LEON_PI(lda [%o0] ASI_LEON_MMUREGS, %o0)
+SUN_PI_(lda [%o0] ASI_M_MMUREGS, %o0)
+ retl
+ nop
+ENDPROC(srmmu_get_fstatus)
+
+
+/* unsigned int srmmu_get_faddr(void) */
+ENTRY(srmmu_get_faddr)
+ mov SRMMU_FAULT_ADDR, %o0
+LEON_PI(lda [%o0] ASI_LEON_MMUREGS, %o0)
+SUN_PI_(lda [%o0] ASI_M_MMUREGS, %o0)
+ retl
+ nop
+ENDPROC(srmmu_get_faddr)
diff --git a/block/Kconfig.iosched b/block/Kconfig.iosched
index 3199b76..421bef9 100644
--- a/block/Kconfig.iosched
+++ b/block/Kconfig.iosched
@@ -23,8 +23,6 @@
config IOSCHED_CFQ
tristate "CFQ I/O scheduler"
- # If BLK_CGROUP is a module, CFQ has to be built as module.
- depends on (BLK_CGROUP=m && m) || !BLK_CGROUP || BLK_CGROUP=y
default y
---help---
The CFQ I/O scheduler tries to distribute bandwidth equally
@@ -34,8 +32,6 @@
This is the default I/O scheduler.
- Note: If BLK_CGROUP=m, then CFQ can be built only as module.
-
config CFQ_GROUP_IOSCHED
bool "CFQ Group Scheduling support"
depends on IOSCHED_CFQ && BLK_CGROUP
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 126c341..02cf633 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -11,1570 +11,612 @@
* Nauman Rafique <nauman@google.com>
*/
#include <linux/ioprio.h>
-#include <linux/seq_file.h>
#include <linux/kdev_t.h>
#include <linux/module.h>
#include <linux/err.h>
#include <linux/blkdev.h>
#include <linux/slab.h>
-#include "blk-cgroup.h"
#include <linux/genhd.h>
+#include <linux/delay.h>
+#include <linux/atomic.h>
+#include "blk-cgroup.h"
+#include "blk.h"
#define MAX_KEY_LEN 100
-static DEFINE_SPINLOCK(blkio_list_lock);
-static LIST_HEAD(blkio_list);
+static DEFINE_MUTEX(blkcg_pol_mutex);
-struct blkio_cgroup blkio_root_cgroup = { .weight = 2*BLKIO_WEIGHT_DEFAULT };
-EXPORT_SYMBOL_GPL(blkio_root_cgroup);
+struct blkcg blkcg_root = { .cfq_weight = 2 * CFQ_WEIGHT_DEFAULT };
+EXPORT_SYMBOL_GPL(blkcg_root);
-/* for encoding cft->private value on file */
-#define BLKIOFILE_PRIVATE(x, val) (((x) << 16) | (val))
-/* What policy owns the file, proportional or throttle */
-#define BLKIOFILE_POLICY(val) (((val) >> 16) & 0xffff)
-#define BLKIOFILE_ATTR(val) ((val) & 0xffff)
+static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS];
-static inline void blkio_policy_insert_node(struct blkio_cgroup *blkcg,
- struct blkio_policy_node *pn)
-{
- list_add(&pn->node, &blkcg->policy_list);
-}
-
-static inline bool cftype_blkg_same_policy(struct cftype *cft,
- struct blkio_group *blkg)
-{
- enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
-
- if (blkg->plid == plid)
- return 1;
-
- return 0;
-}
-
-/* Determines if policy node matches cgroup file being accessed */
-static inline bool pn_matches_cftype(struct cftype *cft,
- struct blkio_policy_node *pn)
-{
- enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
- int fileid = BLKIOFILE_ATTR(cft->private);
-
- return (plid == pn->plid && fileid == pn->fileid);
-}
-
-/* Must be called with blkcg->lock held */
-static inline void blkio_policy_delete_node(struct blkio_policy_node *pn)
-{
- list_del(&pn->node);
-}
-
-/* Must be called with blkcg->lock held */
-static struct blkio_policy_node *
-blkio_policy_search_node(const struct blkio_cgroup *blkcg, dev_t dev,
- enum blkio_policy_id plid, int fileid)
-{
- struct blkio_policy_node *pn;
-
- list_for_each_entry(pn, &blkcg->policy_list, node) {
- if (pn->dev == dev && pn->plid == plid && pn->fileid == fileid)
- return pn;
- }
-
- return NULL;
-}
-
-struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup)
+struct blkcg *cgroup_to_blkcg(struct cgroup *cgroup)
{
return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id),
- struct blkio_cgroup, css);
+ struct blkcg, css);
}
-EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup);
+EXPORT_SYMBOL_GPL(cgroup_to_blkcg);
-struct blkio_cgroup *task_blkio_cgroup(struct task_struct *tsk)
+static struct blkcg *task_blkcg(struct task_struct *tsk)
{
return container_of(task_subsys_state(tsk, blkio_subsys_id),
- struct blkio_cgroup, css);
-}
-EXPORT_SYMBOL_GPL(task_blkio_cgroup);
-
-static inline void
-blkio_update_group_weight(struct blkio_group *blkg, unsigned int weight)
-{
- struct blkio_policy_type *blkiop;
-
- list_for_each_entry(blkiop, &blkio_list, list) {
- /* If this policy does not own the blkg, do not send updates */
- if (blkiop->plid != blkg->plid)
- continue;
- if (blkiop->ops.blkio_update_group_weight_fn)
- blkiop->ops.blkio_update_group_weight_fn(blkg->key,
- blkg, weight);
- }
+ struct blkcg, css);
}
-static inline void blkio_update_group_bps(struct blkio_group *blkg, u64 bps,
- int fileid)
+struct blkcg *bio_blkcg(struct bio *bio)
{
- struct blkio_policy_type *blkiop;
+ if (bio && bio->bi_css)
+ return container_of(bio->bi_css, struct blkcg, css);
+ return task_blkcg(current);
+}
+EXPORT_SYMBOL_GPL(bio_blkcg);
- list_for_each_entry(blkiop, &blkio_list, list) {
+static bool blkcg_policy_enabled(struct request_queue *q,
+ const struct blkcg_policy *pol)
+{
+ return pol && test_bit(pol->plid, q->blkcg_pols);
+}
- /* If this policy does not own the blkg, do not send updates */
- if (blkiop->plid != blkg->plid)
+/**
+ * blkg_free - free a blkg
+ * @blkg: blkg to free
+ *
+ * Free @blkg which may be partially allocated.
+ */
+static void blkg_free(struct blkcg_gq *blkg)
+{
+ int i;
+
+ if (!blkg)
+ return;
+
+ for (i = 0; i < BLKCG_MAX_POLS; i++) {
+ struct blkcg_policy *pol = blkcg_policy[i];
+ struct blkg_policy_data *pd = blkg->pd[i];
+
+ if (!pd)
continue;
- if (fileid == BLKIO_THROTL_read_bps_device
- && blkiop->ops.blkio_update_group_read_bps_fn)
- blkiop->ops.blkio_update_group_read_bps_fn(blkg->key,
- blkg, bps);
+ if (pol && pol->pd_exit_fn)
+ pol->pd_exit_fn(blkg);
- if (fileid == BLKIO_THROTL_write_bps_device
- && blkiop->ops.blkio_update_group_write_bps_fn)
- blkiop->ops.blkio_update_group_write_bps_fn(blkg->key,
- blkg, bps);
+ kfree(pd);
}
+
+ kfree(blkg);
}
-static inline void blkio_update_group_iops(struct blkio_group *blkg,
- unsigned int iops, int fileid)
+/**
+ * blkg_alloc - allocate a blkg
+ * @blkcg: block cgroup the new blkg is associated with
+ * @q: request_queue the new blkg is associated with
+ *
+ * Allocate a new blkg assocating @blkcg and @q.
+ */
+static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q)
{
- struct blkio_policy_type *blkiop;
+ struct blkcg_gq *blkg;
+ int i;
- list_for_each_entry(blkiop, &blkio_list, list) {
+ /* alloc and init base part */
+ blkg = kzalloc_node(sizeof(*blkg), GFP_ATOMIC, q->node);
+ if (!blkg)
+ return NULL;
- /* If this policy does not own the blkg, do not send updates */
- if (blkiop->plid != blkg->plid)
+ blkg->q = q;
+ INIT_LIST_HEAD(&blkg->q_node);
+ blkg->blkcg = blkcg;
+ blkg->refcnt = 1;
+
+ for (i = 0; i < BLKCG_MAX_POLS; i++) {
+ struct blkcg_policy *pol = blkcg_policy[i];
+ struct blkg_policy_data *pd;
+
+ if (!blkcg_policy_enabled(q, pol))
continue;
- if (fileid == BLKIO_THROTL_read_iops_device
- && blkiop->ops.blkio_update_group_read_iops_fn)
- blkiop->ops.blkio_update_group_read_iops_fn(blkg->key,
- blkg, iops);
-
- if (fileid == BLKIO_THROTL_write_iops_device
- && blkiop->ops.blkio_update_group_write_iops_fn)
- blkiop->ops.blkio_update_group_write_iops_fn(blkg->key,
- blkg,iops);
- }
-}
-
-/*
- * Add to the appropriate stat variable depending on the request type.
- * This should be called with the blkg->stats_lock held.
- */
-static void blkio_add_stat(uint64_t *stat, uint64_t add, bool direction,
- bool sync)
-{
- if (direction)
- stat[BLKIO_STAT_WRITE] += add;
- else
- stat[BLKIO_STAT_READ] += add;
- if (sync)
- stat[BLKIO_STAT_SYNC] += add;
- else
- stat[BLKIO_STAT_ASYNC] += add;
-}
-
-/*
- * Decrements the appropriate stat variable if non-zero depending on the
- * request type. Panics on value being zero.
- * This should be called with the blkg->stats_lock held.
- */
-static void blkio_check_and_dec_stat(uint64_t *stat, bool direction, bool sync)
-{
- if (direction) {
- BUG_ON(stat[BLKIO_STAT_WRITE] == 0);
- stat[BLKIO_STAT_WRITE]--;
- } else {
- BUG_ON(stat[BLKIO_STAT_READ] == 0);
- stat[BLKIO_STAT_READ]--;
- }
- if (sync) {
- BUG_ON(stat[BLKIO_STAT_SYNC] == 0);
- stat[BLKIO_STAT_SYNC]--;
- } else {
- BUG_ON(stat[BLKIO_STAT_ASYNC] == 0);
- stat[BLKIO_STAT_ASYNC]--;
- }
-}
-
-#ifdef CONFIG_DEBUG_BLK_CGROUP
-/* This should be called with the blkg->stats_lock held. */
-static void blkio_set_start_group_wait_time(struct blkio_group *blkg,
- struct blkio_group *curr_blkg)
-{
- if (blkio_blkg_waiting(&blkg->stats))
- return;
- if (blkg == curr_blkg)
- return;
- blkg->stats.start_group_wait_time = sched_clock();
- blkio_mark_blkg_waiting(&blkg->stats);
-}
-
-/* This should be called with the blkg->stats_lock held. */
-static void blkio_update_group_wait_time(struct blkio_group_stats *stats)
-{
- unsigned long long now;
-
- if (!blkio_blkg_waiting(stats))
- return;
-
- now = sched_clock();
- if (time_after64(now, stats->start_group_wait_time))
- stats->group_wait_time += now - stats->start_group_wait_time;
- blkio_clear_blkg_waiting(stats);
-}
-
-/* This should be called with the blkg->stats_lock held. */
-static void blkio_end_empty_time(struct blkio_group_stats *stats)
-{
- unsigned long long now;
-
- if (!blkio_blkg_empty(stats))
- return;
-
- now = sched_clock();
- if (time_after64(now, stats->start_empty_time))
- stats->empty_time += now - stats->start_empty_time;
- blkio_clear_blkg_empty(stats);
-}
-
-void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&blkg->stats_lock, flags);
- BUG_ON(blkio_blkg_idling(&blkg->stats));
- blkg->stats.start_idle_time = sched_clock();
- blkio_mark_blkg_idling(&blkg->stats);
- spin_unlock_irqrestore(&blkg->stats_lock, flags);
-}
-EXPORT_SYMBOL_GPL(blkiocg_update_set_idle_time_stats);
-
-void blkiocg_update_idle_time_stats(struct blkio_group *blkg)
-{
- unsigned long flags;
- unsigned long long now;
- struct blkio_group_stats *stats;
-
- spin_lock_irqsave(&blkg->stats_lock, flags);
- stats = &blkg->stats;
- if (blkio_blkg_idling(stats)) {
- now = sched_clock();
- if (time_after64(now, stats->start_idle_time))
- stats->idle_time += now - stats->start_idle_time;
- blkio_clear_blkg_idling(stats);
- }
- spin_unlock_irqrestore(&blkg->stats_lock, flags);
-}
-EXPORT_SYMBOL_GPL(blkiocg_update_idle_time_stats);
-
-void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg)
-{
- unsigned long flags;
- struct blkio_group_stats *stats;
-
- spin_lock_irqsave(&blkg->stats_lock, flags);
- stats = &blkg->stats;
- stats->avg_queue_size_sum +=
- stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] +
- stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE];
- stats->avg_queue_size_samples++;
- blkio_update_group_wait_time(stats);
- spin_unlock_irqrestore(&blkg->stats_lock, flags);
-}
-EXPORT_SYMBOL_GPL(blkiocg_update_avg_queue_size_stats);
-
-void blkiocg_set_start_empty_time(struct blkio_group *blkg)
-{
- unsigned long flags;
- struct blkio_group_stats *stats;
-
- spin_lock_irqsave(&blkg->stats_lock, flags);
- stats = &blkg->stats;
-
- if (stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] ||
- stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE]) {
- spin_unlock_irqrestore(&blkg->stats_lock, flags);
- return;
- }
-
- /*
- * group is already marked empty. This can happen if cfqq got new
- * request in parent group and moved to this group while being added
- * to service tree. Just ignore the event and move on.
- */
- if(blkio_blkg_empty(stats)) {
- spin_unlock_irqrestore(&blkg->stats_lock, flags);
- return;
- }
-
- stats->start_empty_time = sched_clock();
- blkio_mark_blkg_empty(stats);
- spin_unlock_irqrestore(&blkg->stats_lock, flags);
-}
-EXPORT_SYMBOL_GPL(blkiocg_set_start_empty_time);
-
-void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
- unsigned long dequeue)
-{
- blkg->stats.dequeue += dequeue;
-}
-EXPORT_SYMBOL_GPL(blkiocg_update_dequeue_stats);
-#else
-static inline void blkio_set_start_group_wait_time(struct blkio_group *blkg,
- struct blkio_group *curr_blkg) {}
-static inline void blkio_end_empty_time(struct blkio_group_stats *stats) {}
-#endif
-
-void blkiocg_update_io_add_stats(struct blkio_group *blkg,
- struct blkio_group *curr_blkg, bool direction,
- bool sync)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&blkg->stats_lock, flags);
- blkio_add_stat(blkg->stats.stat_arr[BLKIO_STAT_QUEUED], 1, direction,
- sync);
- blkio_end_empty_time(&blkg->stats);
- blkio_set_start_group_wait_time(blkg, curr_blkg);
- spin_unlock_irqrestore(&blkg->stats_lock, flags);
-}
-EXPORT_SYMBOL_GPL(blkiocg_update_io_add_stats);
-
-void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
- bool direction, bool sync)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&blkg->stats_lock, flags);
- blkio_check_and_dec_stat(blkg->stats.stat_arr[BLKIO_STAT_QUEUED],
- direction, sync);
- spin_unlock_irqrestore(&blkg->stats_lock, flags);
-}
-EXPORT_SYMBOL_GPL(blkiocg_update_io_remove_stats);
-
-void blkiocg_update_timeslice_used(struct blkio_group *blkg, unsigned long time,
- unsigned long unaccounted_time)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&blkg->stats_lock, flags);
- blkg->stats.time += time;
-#ifdef CONFIG_DEBUG_BLK_CGROUP
- blkg->stats.unaccounted_time += unaccounted_time;
-#endif
- spin_unlock_irqrestore(&blkg->stats_lock, flags);
-}
-EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used);
-
-/*
- * should be called under rcu read lock or queue lock to make sure blkg pointer
- * is valid.
- */
-void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
- uint64_t bytes, bool direction, bool sync)
-{
- struct blkio_group_stats_cpu *stats_cpu;
- unsigned long flags;
-
- /*
- * Disabling interrupts to provide mutual exclusion between two
- * writes on same cpu. It probably is not needed for 64bit. Not
- * optimizing that case yet.
- */
- local_irq_save(flags);
-
- stats_cpu = this_cpu_ptr(blkg->stats_cpu);
-
- u64_stats_update_begin(&stats_cpu->syncp);
- stats_cpu->sectors += bytes >> 9;
- blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_SERVICED],
- 1, direction, sync);
- blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_SERVICE_BYTES],
- bytes, direction, sync);
- u64_stats_update_end(&stats_cpu->syncp);
- local_irq_restore(flags);
-}
-EXPORT_SYMBOL_GPL(blkiocg_update_dispatch_stats);
-
-void blkiocg_update_completion_stats(struct blkio_group *blkg,
- uint64_t start_time, uint64_t io_start_time, bool direction, bool sync)
-{
- struct blkio_group_stats *stats;
- unsigned long flags;
- unsigned long long now = sched_clock();
-
- spin_lock_irqsave(&blkg->stats_lock, flags);
- stats = &blkg->stats;
- if (time_after64(now, io_start_time))
- blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICE_TIME],
- now - io_start_time, direction, sync);
- if (time_after64(io_start_time, start_time))
- blkio_add_stat(stats->stat_arr[BLKIO_STAT_WAIT_TIME],
- io_start_time - start_time, direction, sync);
- spin_unlock_irqrestore(&blkg->stats_lock, flags);
-}
-EXPORT_SYMBOL_GPL(blkiocg_update_completion_stats);
-
-/* Merged stats are per cpu. */
-void blkiocg_update_io_merged_stats(struct blkio_group *blkg, bool direction,
- bool sync)
-{
- struct blkio_group_stats_cpu *stats_cpu;
- unsigned long flags;
-
- /*
- * Disabling interrupts to provide mutual exclusion between two
- * writes on same cpu. It probably is not needed for 64bit. Not
- * optimizing that case yet.
- */
- local_irq_save(flags);
-
- stats_cpu = this_cpu_ptr(blkg->stats_cpu);
-
- u64_stats_update_begin(&stats_cpu->syncp);
- blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_MERGED], 1,
- direction, sync);
- u64_stats_update_end(&stats_cpu->syncp);
- local_irq_restore(flags);
-}
-EXPORT_SYMBOL_GPL(blkiocg_update_io_merged_stats);
-
-/*
- * This function allocates the per cpu stats for blkio_group. Should be called
- * from sleepable context as alloc_per_cpu() requires that.
- */
-int blkio_alloc_blkg_stats(struct blkio_group *blkg)
-{
- /* Allocate memory for per cpu stats */
- blkg->stats_cpu = alloc_percpu(struct blkio_group_stats_cpu);
- if (!blkg->stats_cpu)
- return -ENOMEM;
- return 0;
-}
-EXPORT_SYMBOL_GPL(blkio_alloc_blkg_stats);
-
-void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
- struct blkio_group *blkg, void *key, dev_t dev,
- enum blkio_policy_id plid)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&blkcg->lock, flags);
- spin_lock_init(&blkg->stats_lock);
- rcu_assign_pointer(blkg->key, key);
- blkg->blkcg_id = css_id(&blkcg->css);
- hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
- blkg->plid = plid;
- spin_unlock_irqrestore(&blkcg->lock, flags);
- /* Need to take css reference ? */
- cgroup_path(blkcg->css.cgroup, blkg->path, sizeof(blkg->path));
- blkg->dev = dev;
-}
-EXPORT_SYMBOL_GPL(blkiocg_add_blkio_group);
-
-static void __blkiocg_del_blkio_group(struct blkio_group *blkg)
-{
- hlist_del_init_rcu(&blkg->blkcg_node);
- blkg->blkcg_id = 0;
-}
-
-/*
- * returns 0 if blkio_group was still on cgroup list. Otherwise returns 1
- * indicating that blk_group was unhashed by the time we got to it.
- */
-int blkiocg_del_blkio_group(struct blkio_group *blkg)
-{
- struct blkio_cgroup *blkcg;
- unsigned long flags;
- struct cgroup_subsys_state *css;
- int ret = 1;
-
- rcu_read_lock();
- css = css_lookup(&blkio_subsys, blkg->blkcg_id);
- if (css) {
- blkcg = container_of(css, struct blkio_cgroup, css);
- spin_lock_irqsave(&blkcg->lock, flags);
- if (!hlist_unhashed(&blkg->blkcg_node)) {
- __blkiocg_del_blkio_group(blkg);
- ret = 0;
+ /* alloc per-policy data and attach it to blkg */
+ pd = kzalloc_node(pol->pd_size, GFP_ATOMIC, q->node);
+ if (!pd) {
+ blkg_free(blkg);
+ return NULL;
}
- spin_unlock_irqrestore(&blkcg->lock, flags);
+
+ blkg->pd[i] = pd;
+ pd->blkg = blkg;
}
- rcu_read_unlock();
- return ret;
+ /* invoke per-policy init */
+ for (i = 0; i < BLKCG_MAX_POLS; i++) {
+ struct blkcg_policy *pol = blkcg_policy[i];
+
+ if (blkcg_policy_enabled(blkg->q, pol))
+ pol->pd_init_fn(blkg);
+ }
+
+ return blkg;
}
-EXPORT_SYMBOL_GPL(blkiocg_del_blkio_group);
-/* called under rcu_read_lock(). */
-struct blkio_group *blkiocg_lookup_group(struct blkio_cgroup *blkcg, void *key)
+static struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg,
+ struct request_queue *q)
{
- struct blkio_group *blkg;
- struct hlist_node *n;
- void *__key;
+ struct blkcg_gq *blkg;
- hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {
- __key = blkg->key;
- if (__key == key)
- return blkg;
- }
+ blkg = rcu_dereference(blkcg->blkg_hint);
+ if (blkg && blkg->q == q)
+ return blkg;
+
+ /*
+ * Hint didn't match. Look up from the radix tree. Note that we
+ * may not be holding queue_lock and thus are not sure whether
+ * @blkg from blkg_tree has already been removed or not, so we
+ * can't update hint to the lookup result. Leave it to the caller.
+ */
+ blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id);
+ if (blkg && blkg->q == q)
+ return blkg;
return NULL;
}
-EXPORT_SYMBOL_GPL(blkiocg_lookup_group);
-static void blkio_reset_stats_cpu(struct blkio_group *blkg)
-{
- struct blkio_group_stats_cpu *stats_cpu;
- int i, j, k;
- /*
- * Note: On 64 bit arch this should not be an issue. This has the
- * possibility of returning some inconsistent value on 32bit arch
- * as 64bit update on 32bit is non atomic. Taking care of this
- * corner case makes code very complicated, like sending IPIs to
- * cpus, taking care of stats of offline cpus etc.
- *
- * reset stats is anyway more of a debug feature and this sounds a
- * corner case. So I am not complicating the code yet until and
- * unless this becomes a real issue.
- */
- for_each_possible_cpu(i) {
- stats_cpu = per_cpu_ptr(blkg->stats_cpu, i);
- stats_cpu->sectors = 0;
- for(j = 0; j < BLKIO_STAT_CPU_NR; j++)
- for (k = 0; k < BLKIO_STAT_TOTAL; k++)
- stats_cpu->stat_arr_cpu[j][k] = 0;
- }
-}
-
-static int
-blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val)
-{
- struct blkio_cgroup *blkcg;
- struct blkio_group *blkg;
- struct blkio_group_stats *stats;
- struct hlist_node *n;
- uint64_t queued[BLKIO_STAT_TOTAL];
- int i;
-#ifdef CONFIG_DEBUG_BLK_CGROUP
- bool idling, waiting, empty;
- unsigned long long now = sched_clock();
-#endif
-
- blkcg = cgroup_to_blkio_cgroup(cgroup);
- spin_lock_irq(&blkcg->lock);
- hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
- spin_lock(&blkg->stats_lock);
- stats = &blkg->stats;
-#ifdef CONFIG_DEBUG_BLK_CGROUP
- idling = blkio_blkg_idling(stats);
- waiting = blkio_blkg_waiting(stats);
- empty = blkio_blkg_empty(stats);
-#endif
- for (i = 0; i < BLKIO_STAT_TOTAL; i++)
- queued[i] = stats->stat_arr[BLKIO_STAT_QUEUED][i];
- memset(stats, 0, sizeof(struct blkio_group_stats));
- for (i = 0; i < BLKIO_STAT_TOTAL; i++)
- stats->stat_arr[BLKIO_STAT_QUEUED][i] = queued[i];
-#ifdef CONFIG_DEBUG_BLK_CGROUP
- if (idling) {
- blkio_mark_blkg_idling(stats);
- stats->start_idle_time = now;
- }
- if (waiting) {
- blkio_mark_blkg_waiting(stats);
- stats->start_group_wait_time = now;
- }
- if (empty) {
- blkio_mark_blkg_empty(stats);
- stats->start_empty_time = now;
- }
-#endif
- spin_unlock(&blkg->stats_lock);
-
- /* Reset Per cpu stats which don't take blkg->stats_lock */
- blkio_reset_stats_cpu(blkg);
- }
-
- spin_unlock_irq(&blkcg->lock);
- return 0;
-}
-
-static void blkio_get_key_name(enum stat_sub_type type, dev_t dev, char *str,
- int chars_left, bool diskname_only)
-{
- snprintf(str, chars_left, "%d:%d", MAJOR(dev), MINOR(dev));
- chars_left -= strlen(str);
- if (chars_left <= 0) {
- printk(KERN_WARNING
- "Possibly incorrect cgroup stat display format");
- return;
- }
- if (diskname_only)
- return;
- switch (type) {
- case BLKIO_STAT_READ:
- strlcat(str, " Read", chars_left);
- break;
- case BLKIO_STAT_WRITE:
- strlcat(str, " Write", chars_left);
- break;
- case BLKIO_STAT_SYNC:
- strlcat(str, " Sync", chars_left);
- break;
- case BLKIO_STAT_ASYNC:
- strlcat(str, " Async", chars_left);
- break;
- case BLKIO_STAT_TOTAL:
- strlcat(str, " Total", chars_left);
- break;
- default:
- strlcat(str, " Invalid", chars_left);
- }
-}
-
-static uint64_t blkio_fill_stat(char *str, int chars_left, uint64_t val,
- struct cgroup_map_cb *cb, dev_t dev)
-{
- blkio_get_key_name(0, dev, str, chars_left, true);
- cb->fill(cb, str, val);
- return val;
-}
-
-
-static uint64_t blkio_read_stat_cpu(struct blkio_group *blkg,
- enum stat_type_cpu type, enum stat_sub_type sub_type)
-{
- int cpu;
- struct blkio_group_stats_cpu *stats_cpu;
- u64 val = 0, tval;
-
- for_each_possible_cpu(cpu) {
- unsigned int start;
- stats_cpu = per_cpu_ptr(blkg->stats_cpu, cpu);
-
- do {
- start = u64_stats_fetch_begin(&stats_cpu->syncp);
- if (type == BLKIO_STAT_CPU_SECTORS)
- tval = stats_cpu->sectors;
- else
- tval = stats_cpu->stat_arr_cpu[type][sub_type];
- } while(u64_stats_fetch_retry(&stats_cpu->syncp, start));
-
- val += tval;
- }
-
- return val;
-}
-
-static uint64_t blkio_get_stat_cpu(struct blkio_group *blkg,
- struct cgroup_map_cb *cb, dev_t dev, enum stat_type_cpu type)
-{
- uint64_t disk_total, val;
- char key_str[MAX_KEY_LEN];
- enum stat_sub_type sub_type;
-
- if (type == BLKIO_STAT_CPU_SECTORS) {
- val = blkio_read_stat_cpu(blkg, type, 0);
- return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, val, cb, dev);
- }
-
- for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL;
- sub_type++) {
- blkio_get_key_name(sub_type, dev, key_str, MAX_KEY_LEN, false);
- val = blkio_read_stat_cpu(blkg, type, sub_type);
- cb->fill(cb, key_str, val);
- }
-
- disk_total = blkio_read_stat_cpu(blkg, type, BLKIO_STAT_READ) +
- blkio_read_stat_cpu(blkg, type, BLKIO_STAT_WRITE);
-
- blkio_get_key_name(BLKIO_STAT_TOTAL, dev, key_str, MAX_KEY_LEN, false);
- cb->fill(cb, key_str, disk_total);
- return disk_total;
-}
-
-/* This should be called with blkg->stats_lock held */
-static uint64_t blkio_get_stat(struct blkio_group *blkg,
- struct cgroup_map_cb *cb, dev_t dev, enum stat_type type)
-{
- uint64_t disk_total;
- char key_str[MAX_KEY_LEN];
- enum stat_sub_type sub_type;
-
- if (type == BLKIO_STAT_TIME)
- return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
- blkg->stats.time, cb, dev);
-#ifdef CONFIG_DEBUG_BLK_CGROUP
- if (type == BLKIO_STAT_UNACCOUNTED_TIME)
- return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
- blkg->stats.unaccounted_time, cb, dev);
- if (type == BLKIO_STAT_AVG_QUEUE_SIZE) {
- uint64_t sum = blkg->stats.avg_queue_size_sum;
- uint64_t samples = blkg->stats.avg_queue_size_samples;
- if (samples)
- do_div(sum, samples);
- else
- sum = 0;
- return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, sum, cb, dev);
- }
- if (type == BLKIO_STAT_GROUP_WAIT_TIME)
- return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
- blkg->stats.group_wait_time, cb, dev);
- if (type == BLKIO_STAT_IDLE_TIME)
- return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
- blkg->stats.idle_time, cb, dev);
- if (type == BLKIO_STAT_EMPTY_TIME)
- return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
- blkg->stats.empty_time, cb, dev);
- if (type == BLKIO_STAT_DEQUEUE)
- return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
- blkg->stats.dequeue, cb, dev);
-#endif
-
- for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL;
- sub_type++) {
- blkio_get_key_name(sub_type, dev, key_str, MAX_KEY_LEN, false);
- cb->fill(cb, key_str, blkg->stats.stat_arr[type][sub_type]);
- }
- disk_total = blkg->stats.stat_arr[type][BLKIO_STAT_READ] +
- blkg->stats.stat_arr[type][BLKIO_STAT_WRITE];
- blkio_get_key_name(BLKIO_STAT_TOTAL, dev, key_str, MAX_KEY_LEN, false);
- cb->fill(cb, key_str, disk_total);
- return disk_total;
-}
-
-static int blkio_policy_parse_and_set(char *buf,
- struct blkio_policy_node *newpn, enum blkio_policy_id plid, int fileid)
-{
- struct gendisk *disk = NULL;
- char *s[4], *p, *major_s = NULL, *minor_s = NULL;
- unsigned long major, minor;
- int i = 0, ret = -EINVAL;
- int part;
- dev_t dev;
- u64 temp;
-
- memset(s, 0, sizeof(s));
-
- while ((p = strsep(&buf, " ")) != NULL) {
- if (!*p)
- continue;
-
- s[i++] = p;
-
- /* Prevent from inputing too many things */
- if (i == 3)
- break;
- }
-
- if (i != 2)
- goto out;
-
- p = strsep(&s[0], ":");
- if (p != NULL)
- major_s = p;
- else
- goto out;
-
- minor_s = s[0];
- if (!minor_s)
- goto out;
-
- if (strict_strtoul(major_s, 10, &major))
- goto out;
-
- if (strict_strtoul(minor_s, 10, &minor))
- goto out;
-
- dev = MKDEV(major, minor);
-
- if (strict_strtoull(s[1], 10, &temp))
- goto out;
-
- /* For rule removal, do not check for device presence. */
- if (temp) {
- disk = get_gendisk(dev, &part);
- if (!disk || part) {
- ret = -ENODEV;
- goto out;
- }
- }
-
- newpn->dev = dev;
-
- switch (plid) {
- case BLKIO_POLICY_PROP:
- if ((temp < BLKIO_WEIGHT_MIN && temp > 0) ||
- temp > BLKIO_WEIGHT_MAX)
- goto out;
-
- newpn->plid = plid;
- newpn->fileid = fileid;
- newpn->val.weight = temp;
- break;
- case BLKIO_POLICY_THROTL:
- switch(fileid) {
- case BLKIO_THROTL_read_bps_device:
- case BLKIO_THROTL_write_bps_device:
- newpn->plid = plid;
- newpn->fileid = fileid;
- newpn->val.bps = temp;
- break;
- case BLKIO_THROTL_read_iops_device:
- case BLKIO_THROTL_write_iops_device:
- if (temp > THROTL_IOPS_MAX)
- goto out;
-
- newpn->plid = plid;
- newpn->fileid = fileid;
- newpn->val.iops = (unsigned int)temp;
- break;
- }
- break;
- default:
- BUG();
- }
- ret = 0;
-out:
- put_disk(disk);
- return ret;
-}
-
-unsigned int blkcg_get_weight(struct blkio_cgroup *blkcg,
- dev_t dev)
-{
- struct blkio_policy_node *pn;
- unsigned long flags;
- unsigned int weight;
-
- spin_lock_irqsave(&blkcg->lock, flags);
-
- pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_PROP,
- BLKIO_PROP_weight_device);
- if (pn)
- weight = pn->val.weight;
- else
- weight = blkcg->weight;
-
- spin_unlock_irqrestore(&blkcg->lock, flags);
-
- return weight;
-}
-EXPORT_SYMBOL_GPL(blkcg_get_weight);
-
-uint64_t blkcg_get_read_bps(struct blkio_cgroup *blkcg, dev_t dev)
-{
- struct blkio_policy_node *pn;
- unsigned long flags;
- uint64_t bps = -1;
-
- spin_lock_irqsave(&blkcg->lock, flags);
- pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
- BLKIO_THROTL_read_bps_device);
- if (pn)
- bps = pn->val.bps;
- spin_unlock_irqrestore(&blkcg->lock, flags);
-
- return bps;
-}
-
-uint64_t blkcg_get_write_bps(struct blkio_cgroup *blkcg, dev_t dev)
-{
- struct blkio_policy_node *pn;
- unsigned long flags;
- uint64_t bps = -1;
-
- spin_lock_irqsave(&blkcg->lock, flags);
- pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
- BLKIO_THROTL_write_bps_device);
- if (pn)
- bps = pn->val.bps;
- spin_unlock_irqrestore(&blkcg->lock, flags);
-
- return bps;
-}
-
-unsigned int blkcg_get_read_iops(struct blkio_cgroup *blkcg, dev_t dev)
-{
- struct blkio_policy_node *pn;
- unsigned long flags;
- unsigned int iops = -1;
-
- spin_lock_irqsave(&blkcg->lock, flags);
- pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
- BLKIO_THROTL_read_iops_device);
- if (pn)
- iops = pn->val.iops;
- spin_unlock_irqrestore(&blkcg->lock, flags);
-
- return iops;
-}
-
-unsigned int blkcg_get_write_iops(struct blkio_cgroup *blkcg, dev_t dev)
-{
- struct blkio_policy_node *pn;
- unsigned long flags;
- unsigned int iops = -1;
-
- spin_lock_irqsave(&blkcg->lock, flags);
- pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
- BLKIO_THROTL_write_iops_device);
- if (pn)
- iops = pn->val.iops;
- spin_unlock_irqrestore(&blkcg->lock, flags);
-
- return iops;
-}
-
-/* Checks whether user asked for deleting a policy rule */
-static bool blkio_delete_rule_command(struct blkio_policy_node *pn)
-{
- switch(pn->plid) {
- case BLKIO_POLICY_PROP:
- if (pn->val.weight == 0)
- return 1;
- break;
- case BLKIO_POLICY_THROTL:
- switch(pn->fileid) {
- case BLKIO_THROTL_read_bps_device:
- case BLKIO_THROTL_write_bps_device:
- if (pn->val.bps == 0)
- return 1;
- break;
- case BLKIO_THROTL_read_iops_device:
- case BLKIO_THROTL_write_iops_device:
- if (pn->val.iops == 0)
- return 1;
- }
- break;
- default:
- BUG();
- }
-
- return 0;
-}
-
-static void blkio_update_policy_rule(struct blkio_policy_node *oldpn,
- struct blkio_policy_node *newpn)
-{
- switch(oldpn->plid) {
- case BLKIO_POLICY_PROP:
- oldpn->val.weight = newpn->val.weight;
- break;
- case BLKIO_POLICY_THROTL:
- switch(newpn->fileid) {
- case BLKIO_THROTL_read_bps_device:
- case BLKIO_THROTL_write_bps_device:
- oldpn->val.bps = newpn->val.bps;
- break;
- case BLKIO_THROTL_read_iops_device:
- case BLKIO_THROTL_write_iops_device:
- oldpn->val.iops = newpn->val.iops;
- }
- break;
- default:
- BUG();
- }
-}
-
-/*
- * Some rules/values in blkg have changed. Propagate those to respective
- * policies.
+/**
+ * blkg_lookup - lookup blkg for the specified blkcg - q pair
+ * @blkcg: blkcg of interest
+ * @q: request_queue of interest
+ *
+ * Lookup blkg for the @blkcg - @q pair. This function should be called
+ * under RCU read lock and is guaranteed to return %NULL if @q is bypassing
+ * - see blk_queue_bypass_start() for details.
*/
-static void blkio_update_blkg_policy(struct blkio_cgroup *blkcg,
- struct blkio_group *blkg, struct blkio_policy_node *pn)
+struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q)
{
- unsigned int weight, iops;
- u64 bps;
+ WARN_ON_ONCE(!rcu_read_lock_held());
- switch(pn->plid) {
- case BLKIO_POLICY_PROP:
- weight = pn->val.weight ? pn->val.weight :
- blkcg->weight;
- blkio_update_group_weight(blkg, weight);
- break;
- case BLKIO_POLICY_THROTL:
- switch(pn->fileid) {
- case BLKIO_THROTL_read_bps_device:
- case BLKIO_THROTL_write_bps_device:
- bps = pn->val.bps ? pn->val.bps : (-1);
- blkio_update_group_bps(blkg, bps, pn->fileid);
- break;
- case BLKIO_THROTL_read_iops_device:
- case BLKIO_THROTL_write_iops_device:
- iops = pn->val.iops ? pn->val.iops : (-1);
- blkio_update_group_iops(blkg, iops, pn->fileid);
- break;
- }
- break;
- default:
- BUG();
- }
+ if (unlikely(blk_queue_bypass(q)))
+ return NULL;
+ return __blkg_lookup(blkcg, q);
}
+EXPORT_SYMBOL_GPL(blkg_lookup);
-/*
- * A policy node rule has been updated. Propagate this update to all the
- * block groups which might be affected by this update.
- */
-static void blkio_update_policy_node_blkg(struct blkio_cgroup *blkcg,
- struct blkio_policy_node *pn)
+static struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg,
+ struct request_queue *q)
+ __releases(q->queue_lock) __acquires(q->queue_lock)
{
- struct blkio_group *blkg;
- struct hlist_node *n;
+ struct blkcg_gq *blkg;
+ int ret;
- spin_lock(&blkio_list_lock);
- spin_lock_irq(&blkcg->lock);
+ WARN_ON_ONCE(!rcu_read_lock_held());
+ lockdep_assert_held(q->queue_lock);
- hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
- if (pn->dev != blkg->dev || pn->plid != blkg->plid)
- continue;
- blkio_update_blkg_policy(blkcg, blkg, pn);
+ /* lookup and update hint on success, see __blkg_lookup() for details */
+ blkg = __blkg_lookup(blkcg, q);
+ if (blkg) {
+ rcu_assign_pointer(blkcg->blkg_hint, blkg);
+ return blkg;
}
- spin_unlock_irq(&blkcg->lock);
- spin_unlock(&blkio_list_lock);
-}
+ /* blkg holds a reference to blkcg */
+ if (!css_tryget(&blkcg->css))
+ return ERR_PTR(-EINVAL);
-static int blkiocg_file_write(struct cgroup *cgrp, struct cftype *cft,
- const char *buffer)
-{
- int ret = 0;
- char *buf;
- struct blkio_policy_node *newpn, *pn;
- struct blkio_cgroup *blkcg;
- int keep_newpn = 0;
- enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
- int fileid = BLKIOFILE_ATTR(cft->private);
+ /* allocate */
+ ret = -ENOMEM;
+ blkg = blkg_alloc(blkcg, q);
+ if (unlikely(!blkg))
+ goto err_put;
- buf = kstrdup(buffer, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- newpn = kzalloc(sizeof(*newpn), GFP_KERNEL);
- if (!newpn) {
- ret = -ENOMEM;
- goto free_buf;
- }
-
- ret = blkio_policy_parse_and_set(buf, newpn, plid, fileid);
+ /* insert */
+ ret = radix_tree_preload(GFP_ATOMIC);
if (ret)
- goto free_newpn;
+ goto err_free;
- blkcg = cgroup_to_blkio_cgroup(cgrp);
+ spin_lock(&blkcg->lock);
+ ret = radix_tree_insert(&blkcg->blkg_tree, q->id, blkg);
+ if (likely(!ret)) {
+ hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
+ list_add(&blkg->q_node, &q->blkg_list);
+ }
+ spin_unlock(&blkcg->lock);
+ radix_tree_preload_end();
+
+ if (!ret)
+ return blkg;
+err_free:
+ blkg_free(blkg);
+err_put:
+ css_put(&blkcg->css);
+ return ERR_PTR(ret);
+}
+
+struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
+ struct request_queue *q)
+{
+ /*
+ * This could be the first entry point of blkcg implementation and
+ * we shouldn't allow anything to go through for a bypassing queue.
+ */
+ if (unlikely(blk_queue_bypass(q)))
+ return ERR_PTR(blk_queue_dead(q) ? -EINVAL : -EBUSY);
+ return __blkg_lookup_create(blkcg, q);
+}
+EXPORT_SYMBOL_GPL(blkg_lookup_create);
+
+static void blkg_destroy(struct blkcg_gq *blkg)
+{
+ struct request_queue *q = blkg->q;
+ struct blkcg *blkcg = blkg->blkcg;
+
+ lockdep_assert_held(q->queue_lock);
+ lockdep_assert_held(&blkcg->lock);
+
+ /* Something wrong if we are trying to remove same group twice */
+ WARN_ON_ONCE(list_empty(&blkg->q_node));
+ WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node));
+
+ radix_tree_delete(&blkcg->blkg_tree, blkg->q->id);
+ list_del_init(&blkg->q_node);
+ hlist_del_init_rcu(&blkg->blkcg_node);
+
+ /*
+ * Both setting lookup hint to and clearing it from @blkg are done
+ * under queue_lock. If it's not pointing to @blkg now, it never
+ * will. Hint assignment itself can race safely.
+ */
+ if (rcu_dereference_raw(blkcg->blkg_hint) == blkg)
+ rcu_assign_pointer(blkcg->blkg_hint, NULL);
+
+ /*
+ * Put the reference taken at the time of creation so that when all
+ * queues are gone, group can be destroyed.
+ */
+ blkg_put(blkg);
+}
+
+/**
+ * blkg_destroy_all - destroy all blkgs associated with a request_queue
+ * @q: request_queue of interest
+ *
+ * Destroy all blkgs associated with @q.
+ */
+static void blkg_destroy_all(struct request_queue *q)
+{
+ struct blkcg_gq *blkg, *n;
+
+ lockdep_assert_held(q->queue_lock);
+
+ list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
+ struct blkcg *blkcg = blkg->blkcg;
+
+ spin_lock(&blkcg->lock);
+ blkg_destroy(blkg);
+ spin_unlock(&blkcg->lock);
+ }
+}
+
+static void blkg_rcu_free(struct rcu_head *rcu_head)
+{
+ blkg_free(container_of(rcu_head, struct blkcg_gq, rcu_head));
+}
+
+void __blkg_release(struct blkcg_gq *blkg)
+{
+ /* release the extra blkcg reference this blkg has been holding */
+ css_put(&blkg->blkcg->css);
+
+ /*
+ * A group is freed in rcu manner. But having an rcu lock does not
+ * mean that one can access all the fields of blkg and assume these
+ * are valid. For example, don't try to follow throtl_data and
+ * request queue links.
+ *
+ * Having a reference to blkg under an rcu allows acess to only
+ * values local to groups like group stats and group rate limits
+ */
+ call_rcu(&blkg->rcu_head, blkg_rcu_free);
+}
+EXPORT_SYMBOL_GPL(__blkg_release);
+
+static int blkcg_reset_stats(struct cgroup *cgroup, struct cftype *cftype,
+ u64 val)
+{
+ struct blkcg *blkcg = cgroup_to_blkcg(cgroup);
+ struct blkcg_gq *blkg;
+ struct hlist_node *n;
+ int i;
+
+ mutex_lock(&blkcg_pol_mutex);
spin_lock_irq(&blkcg->lock);
- pn = blkio_policy_search_node(blkcg, newpn->dev, plid, fileid);
- if (!pn) {
- if (!blkio_delete_rule_command(newpn)) {
- blkio_policy_insert_node(blkcg, newpn);
- keep_newpn = 1;
+ /*
+ * Note that stat reset is racy - it doesn't synchronize against
+ * stat updates. This is a debug feature which shouldn't exist
+ * anyway. If you get hit by a race, retry.
+ */
+ hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
+ for (i = 0; i < BLKCG_MAX_POLS; i++) {
+ struct blkcg_policy *pol = blkcg_policy[i];
+
+ if (blkcg_policy_enabled(blkg->q, pol) &&
+ pol->pd_reset_stats_fn)
+ pol->pd_reset_stats_fn(blkg);
}
- spin_unlock_irq(&blkcg->lock);
- goto update_io_group;
}
- if (blkio_delete_rule_command(newpn)) {
- blkio_policy_delete_node(pn);
- kfree(pn);
- spin_unlock_irq(&blkcg->lock);
- goto update_io_group;
- }
+ spin_unlock_irq(&blkcg->lock);
+ mutex_unlock(&blkcg_pol_mutex);
+ return 0;
+}
+
+static const char *blkg_dev_name(struct blkcg_gq *blkg)
+{
+ /* some drivers (floppy) instantiate a queue w/o disk registered */
+ if (blkg->q->backing_dev_info.dev)
+ return dev_name(blkg->q->backing_dev_info.dev);
+ return NULL;
+}
+
+/**
+ * blkcg_print_blkgs - helper for printing per-blkg data
+ * @sf: seq_file to print to
+ * @blkcg: blkcg of interest
+ * @prfill: fill function to print out a blkg
+ * @pol: policy in question
+ * @data: data to be passed to @prfill
+ * @show_total: to print out sum of prfill return values or not
+ *
+ * This function invokes @prfill on each blkg of @blkcg if pd for the
+ * policy specified by @pol exists. @prfill is invoked with @sf, the
+ * policy data and @data. If @show_total is %true, the sum of the return
+ * values from @prfill is printed with "Total" label at the end.
+ *
+ * This is to be used to construct print functions for
+ * cftype->read_seq_string method.
+ */
+void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
+ u64 (*prfill)(struct seq_file *,
+ struct blkg_policy_data *, int),
+ const struct blkcg_policy *pol, int data,
+ bool show_total)
+{
+ struct blkcg_gq *blkg;
+ struct hlist_node *n;
+ u64 total = 0;
+
+ spin_lock_irq(&blkcg->lock);
+ hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node)
+ if (blkcg_policy_enabled(blkg->q, pol))
+ total += prfill(sf, blkg->pd[pol->plid], data);
spin_unlock_irq(&blkcg->lock);
- blkio_update_policy_rule(pn, newpn);
-
-update_io_group:
- blkio_update_policy_node_blkg(blkcg, newpn);
-
-free_newpn:
- if (!keep_newpn)
- kfree(newpn);
-free_buf:
- kfree(buf);
- return ret;
-}
-
-static void
-blkio_print_policy_node(struct seq_file *m, struct blkio_policy_node *pn)
-{
- switch(pn->plid) {
- case BLKIO_POLICY_PROP:
- if (pn->fileid == BLKIO_PROP_weight_device)
- seq_printf(m, "%u:%u\t%u\n", MAJOR(pn->dev),
- MINOR(pn->dev), pn->val.weight);
- break;
- case BLKIO_POLICY_THROTL:
- switch(pn->fileid) {
- case BLKIO_THROTL_read_bps_device:
- case BLKIO_THROTL_write_bps_device:
- seq_printf(m, "%u:%u\t%llu\n", MAJOR(pn->dev),
- MINOR(pn->dev), pn->val.bps);
- break;
- case BLKIO_THROTL_read_iops_device:
- case BLKIO_THROTL_write_iops_device:
- seq_printf(m, "%u:%u\t%u\n", MAJOR(pn->dev),
- MINOR(pn->dev), pn->val.iops);
- break;
- }
- break;
- default:
- BUG();
- }
-}
-
-/* cgroup files which read their data from policy nodes end up here */
-static void blkio_read_policy_node_files(struct cftype *cft,
- struct blkio_cgroup *blkcg, struct seq_file *m)
-{
- struct blkio_policy_node *pn;
-
- if (!list_empty(&blkcg->policy_list)) {
- spin_lock_irq(&blkcg->lock);
- list_for_each_entry(pn, &blkcg->policy_list, node) {
- if (!pn_matches_cftype(cft, pn))
- continue;
- blkio_print_policy_node(m, pn);
- }
- spin_unlock_irq(&blkcg->lock);
- }
-}
-
-static int blkiocg_file_read(struct cgroup *cgrp, struct cftype *cft,
- struct seq_file *m)
-{
- struct blkio_cgroup *blkcg;
- enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
- int name = BLKIOFILE_ATTR(cft->private);
-
- blkcg = cgroup_to_blkio_cgroup(cgrp);
-
- switch(plid) {
- case BLKIO_POLICY_PROP:
- switch(name) {
- case BLKIO_PROP_weight_device:
- blkio_read_policy_node_files(cft, blkcg, m);
- return 0;
- default:
- BUG();
- }
- break;
- case BLKIO_POLICY_THROTL:
- switch(name){
- case BLKIO_THROTL_read_bps_device:
- case BLKIO_THROTL_write_bps_device:
- case BLKIO_THROTL_read_iops_device:
- case BLKIO_THROTL_write_iops_device:
- blkio_read_policy_node_files(cft, blkcg, m);
- return 0;
- default:
- BUG();
- }
- break;
- default:
- BUG();
- }
-
- return 0;
-}
-
-static int blkio_read_blkg_stats(struct blkio_cgroup *blkcg,
- struct cftype *cft, struct cgroup_map_cb *cb,
- enum stat_type type, bool show_total, bool pcpu)
-{
- struct blkio_group *blkg;
- struct hlist_node *n;
- uint64_t cgroup_total = 0;
-
- rcu_read_lock();
- hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {
- if (blkg->dev) {
- if (!cftype_blkg_same_policy(cft, blkg))
- continue;
- if (pcpu)
- cgroup_total += blkio_get_stat_cpu(blkg, cb,
- blkg->dev, type);
- else {
- spin_lock_irq(&blkg->stats_lock);
- cgroup_total += blkio_get_stat(blkg, cb,
- blkg->dev, type);
- spin_unlock_irq(&blkg->stats_lock);
- }
- }
- }
if (show_total)
- cb->fill(cb, "Total", cgroup_total);
- rcu_read_unlock();
- return 0;
+ seq_printf(sf, "Total %llu\n", (unsigned long long)total);
+}
+EXPORT_SYMBOL_GPL(blkcg_print_blkgs);
+
+/**
+ * __blkg_prfill_u64 - prfill helper for a single u64 value
+ * @sf: seq_file to print to
+ * @pd: policy private data of interest
+ * @v: value to print
+ *
+ * Print @v to @sf for the device assocaited with @pd.
+ */
+u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v)
+{
+ const char *dname = blkg_dev_name(pd->blkg);
+
+ if (!dname)
+ return 0;
+
+ seq_printf(sf, "%s %llu\n", dname, (unsigned long long)v);
+ return v;
+}
+EXPORT_SYMBOL_GPL(__blkg_prfill_u64);
+
+/**
+ * __blkg_prfill_rwstat - prfill helper for a blkg_rwstat
+ * @sf: seq_file to print to
+ * @pd: policy private data of interest
+ * @rwstat: rwstat to print
+ *
+ * Print @rwstat to @sf for the device assocaited with @pd.
+ */
+u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
+ const struct blkg_rwstat *rwstat)
+{
+ static const char *rwstr[] = {
+ [BLKG_RWSTAT_READ] = "Read",
+ [BLKG_RWSTAT_WRITE] = "Write",
+ [BLKG_RWSTAT_SYNC] = "Sync",
+ [BLKG_RWSTAT_ASYNC] = "Async",
+ };
+ const char *dname = blkg_dev_name(pd->blkg);
+ u64 v;
+ int i;
+
+ if (!dname)
+ return 0;
+
+ for (i = 0; i < BLKG_RWSTAT_NR; i++)
+ seq_printf(sf, "%s %s %llu\n", dname, rwstr[i],
+ (unsigned long long)rwstat->cnt[i]);
+
+ v = rwstat->cnt[BLKG_RWSTAT_READ] + rwstat->cnt[BLKG_RWSTAT_WRITE];
+ seq_printf(sf, "%s Total %llu\n", dname, (unsigned long long)v);
+ return v;
}
-/* All map kind of cgroup file get serviced by this function */
-static int blkiocg_file_read_map(struct cgroup *cgrp, struct cftype *cft,
- struct cgroup_map_cb *cb)
+/**
+ * blkg_prfill_stat - prfill callback for blkg_stat
+ * @sf: seq_file to print to
+ * @pd: policy private data of interest
+ * @off: offset to the blkg_stat in @pd
+ *
+ * prfill callback for printing a blkg_stat.
+ */
+u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off)
{
- struct blkio_cgroup *blkcg;
- enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
- int name = BLKIOFILE_ATTR(cft->private);
-
- blkcg = cgroup_to_blkio_cgroup(cgrp);
-
- switch(plid) {
- case BLKIO_POLICY_PROP:
- switch(name) {
- case BLKIO_PROP_time:
- return blkio_read_blkg_stats(blkcg, cft, cb,
- BLKIO_STAT_TIME, 0, 0);
- case BLKIO_PROP_sectors:
- return blkio_read_blkg_stats(blkcg, cft, cb,
- BLKIO_STAT_CPU_SECTORS, 0, 1);
- case BLKIO_PROP_io_service_bytes:
- return blkio_read_blkg_stats(blkcg, cft, cb,
- BLKIO_STAT_CPU_SERVICE_BYTES, 1, 1);
- case BLKIO_PROP_io_serviced:
- return blkio_read_blkg_stats(blkcg, cft, cb,
- BLKIO_STAT_CPU_SERVICED, 1, 1);
- case BLKIO_PROP_io_service_time:
- return blkio_read_blkg_stats(blkcg, cft, cb,
- BLKIO_STAT_SERVICE_TIME, 1, 0);
- case BLKIO_PROP_io_wait_time:
- return blkio_read_blkg_stats(blkcg, cft, cb,
- BLKIO_STAT_WAIT_TIME, 1, 0);
- case BLKIO_PROP_io_merged:
- return blkio_read_blkg_stats(blkcg, cft, cb,
- BLKIO_STAT_CPU_MERGED, 1, 1);
- case BLKIO_PROP_io_queued:
- return blkio_read_blkg_stats(blkcg, cft, cb,
- BLKIO_STAT_QUEUED, 1, 0);
-#ifdef CONFIG_DEBUG_BLK_CGROUP
- case BLKIO_PROP_unaccounted_time:
- return blkio_read_blkg_stats(blkcg, cft, cb,
- BLKIO_STAT_UNACCOUNTED_TIME, 0, 0);
- case BLKIO_PROP_dequeue:
- return blkio_read_blkg_stats(blkcg, cft, cb,
- BLKIO_STAT_DEQUEUE, 0, 0);
- case BLKIO_PROP_avg_queue_size:
- return blkio_read_blkg_stats(blkcg, cft, cb,
- BLKIO_STAT_AVG_QUEUE_SIZE, 0, 0);
- case BLKIO_PROP_group_wait_time:
- return blkio_read_blkg_stats(blkcg, cft, cb,
- BLKIO_STAT_GROUP_WAIT_TIME, 0, 0);
- case BLKIO_PROP_idle_time:
- return blkio_read_blkg_stats(blkcg, cft, cb,
- BLKIO_STAT_IDLE_TIME, 0, 0);
- case BLKIO_PROP_empty_time:
- return blkio_read_blkg_stats(blkcg, cft, cb,
- BLKIO_STAT_EMPTY_TIME, 0, 0);
-#endif
- default:
- BUG();
- }
- break;
- case BLKIO_POLICY_THROTL:
- switch(name){
- case BLKIO_THROTL_io_service_bytes:
- return blkio_read_blkg_stats(blkcg, cft, cb,
- BLKIO_STAT_CPU_SERVICE_BYTES, 1, 1);
- case BLKIO_THROTL_io_serviced:
- return blkio_read_blkg_stats(blkcg, cft, cb,
- BLKIO_STAT_CPU_SERVICED, 1, 1);
- default:
- BUG();
- }
- break;
- default:
- BUG();
- }
-
- return 0;
+ return __blkg_prfill_u64(sf, pd, blkg_stat_read((void *)pd + off));
}
+EXPORT_SYMBOL_GPL(blkg_prfill_stat);
-static int blkio_weight_write(struct blkio_cgroup *blkcg, u64 val)
+/**
+ * blkg_prfill_rwstat - prfill callback for blkg_rwstat
+ * @sf: seq_file to print to
+ * @pd: policy private data of interest
+ * @off: offset to the blkg_rwstat in @pd
+ *
+ * prfill callback for printing a blkg_rwstat.
+ */
+u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
+ int off)
{
- struct blkio_group *blkg;
- struct hlist_node *n;
- struct blkio_policy_node *pn;
+ struct blkg_rwstat rwstat = blkg_rwstat_read((void *)pd + off);
- if (val < BLKIO_WEIGHT_MIN || val > BLKIO_WEIGHT_MAX)
+ return __blkg_prfill_rwstat(sf, pd, &rwstat);
+}
+EXPORT_SYMBOL_GPL(blkg_prfill_rwstat);
+
+/**
+ * blkg_conf_prep - parse and prepare for per-blkg config update
+ * @blkcg: target block cgroup
+ * @pol: target policy
+ * @input: input string
+ * @ctx: blkg_conf_ctx to be filled
+ *
+ * Parse per-blkg config update from @input and initialize @ctx with the
+ * result. @ctx->blkg points to the blkg to be updated and @ctx->v the new
+ * value. This function returns with RCU read lock and queue lock held and
+ * must be paired with blkg_conf_finish().
+ */
+int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
+ const char *input, struct blkg_conf_ctx *ctx)
+ __acquires(rcu) __acquires(disk->queue->queue_lock)
+{
+ struct gendisk *disk;
+ struct blkcg_gq *blkg;
+ unsigned int major, minor;
+ unsigned long long v;
+ int part, ret;
+
+ if (sscanf(input, "%u:%u %llu", &major, &minor, &v) != 3)
return -EINVAL;
- spin_lock(&blkio_list_lock);
- spin_lock_irq(&blkcg->lock);
- blkcg->weight = (unsigned int)val;
+ disk = get_gendisk(MKDEV(major, minor), &part);
+ if (!disk || part)
+ return -EINVAL;
- hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
- pn = blkio_policy_search_node(blkcg, blkg->dev,
- BLKIO_POLICY_PROP, BLKIO_PROP_weight_device);
- if (pn)
- continue;
+ rcu_read_lock();
+ spin_lock_irq(disk->queue->queue_lock);
- blkio_update_group_weight(blkg, blkcg->weight);
- }
- spin_unlock_irq(&blkcg->lock);
- spin_unlock(&blkio_list_lock);
- return 0;
-}
+ if (blkcg_policy_enabled(disk->queue, pol))
+ blkg = blkg_lookup_create(blkcg, disk->queue);
+ else
+ blkg = ERR_PTR(-EINVAL);
-static u64 blkiocg_file_read_u64 (struct cgroup *cgrp, struct cftype *cft) {
- struct blkio_cgroup *blkcg;
- enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
- int name = BLKIOFILE_ATTR(cft->private);
-
- blkcg = cgroup_to_blkio_cgroup(cgrp);
-
- switch(plid) {
- case BLKIO_POLICY_PROP:
- switch(name) {
- case BLKIO_PROP_weight:
- return (u64)blkcg->weight;
+ if (IS_ERR(blkg)) {
+ ret = PTR_ERR(blkg);
+ rcu_read_unlock();
+ spin_unlock_irq(disk->queue->queue_lock);
+ put_disk(disk);
+ /*
+ * If queue was bypassing, we should retry. Do so after a
+ * short msleep(). It isn't strictly necessary but queue
+ * can be bypassing for some time and it's always nice to
+ * avoid busy looping.
+ */
+ if (ret == -EBUSY) {
+ msleep(10);
+ ret = restart_syscall();
}
- break;
- default:
- BUG();
+ return ret;
}
+
+ ctx->disk = disk;
+ ctx->blkg = blkg;
+ ctx->v = v;
return 0;
}
+EXPORT_SYMBOL_GPL(blkg_conf_prep);
-static int
-blkiocg_file_write_u64(struct cgroup *cgrp, struct cftype *cft, u64 val)
+/**
+ * blkg_conf_finish - finish up per-blkg config update
+ * @ctx: blkg_conf_ctx intiailized by blkg_conf_prep()
+ *
+ * Finish up after per-blkg config update. This function must be paired
+ * with blkg_conf_prep().
+ */
+void blkg_conf_finish(struct blkg_conf_ctx *ctx)
+ __releases(ctx->disk->queue->queue_lock) __releases(rcu)
{
- struct blkio_cgroup *blkcg;
- enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
- int name = BLKIOFILE_ATTR(cft->private);
-
- blkcg = cgroup_to_blkio_cgroup(cgrp);
-
- switch(plid) {
- case BLKIO_POLICY_PROP:
- switch(name) {
- case BLKIO_PROP_weight:
- return blkio_weight_write(blkcg, val);
- }
- break;
- default:
- BUG();
- }
-
- return 0;
+ spin_unlock_irq(ctx->disk->queue->queue_lock);
+ rcu_read_unlock();
+ put_disk(ctx->disk);
}
+EXPORT_SYMBOL_GPL(blkg_conf_finish);
-struct cftype blkio_files[] = {
- {
- .name = "weight_device",
- .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
- BLKIO_PROP_weight_device),
- .read_seq_string = blkiocg_file_read,
- .write_string = blkiocg_file_write,
- .max_write_len = 256,
- },
- {
- .name = "weight",
- .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
- BLKIO_PROP_weight),
- .read_u64 = blkiocg_file_read_u64,
- .write_u64 = blkiocg_file_write_u64,
- },
- {
- .name = "time",
- .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
- BLKIO_PROP_time),
- .read_map = blkiocg_file_read_map,
- },
- {
- .name = "sectors",
- .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
- BLKIO_PROP_sectors),
- .read_map = blkiocg_file_read_map,
- },
- {
- .name = "io_service_bytes",
- .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
- BLKIO_PROP_io_service_bytes),
- .read_map = blkiocg_file_read_map,
- },
- {
- .name = "io_serviced",
- .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
- BLKIO_PROP_io_serviced),
- .read_map = blkiocg_file_read_map,
- },
- {
- .name = "io_service_time",
- .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
- BLKIO_PROP_io_service_time),
- .read_map = blkiocg_file_read_map,
- },
- {
- .name = "io_wait_time",
- .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
- BLKIO_PROP_io_wait_time),
- .read_map = blkiocg_file_read_map,
- },
- {
- .name = "io_merged",
- .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
- BLKIO_PROP_io_merged),
- .read_map = blkiocg_file_read_map,
- },
- {
- .name = "io_queued",
- .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
- BLKIO_PROP_io_queued),
- .read_map = blkiocg_file_read_map,
- },
+struct cftype blkcg_files[] = {
{
.name = "reset_stats",
- .write_u64 = blkiocg_reset_stats,
+ .write_u64 = blkcg_reset_stats,
},
-#ifdef CONFIG_BLK_DEV_THROTTLING
- {
- .name = "throttle.read_bps_device",
- .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
- BLKIO_THROTL_read_bps_device),
- .read_seq_string = blkiocg_file_read,
- .write_string = blkiocg_file_write,
- .max_write_len = 256,
- },
-
- {
- .name = "throttle.write_bps_device",
- .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
- BLKIO_THROTL_write_bps_device),
- .read_seq_string = blkiocg_file_read,
- .write_string = blkiocg_file_write,
- .max_write_len = 256,
- },
-
- {
- .name = "throttle.read_iops_device",
- .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
- BLKIO_THROTL_read_iops_device),
- .read_seq_string = blkiocg_file_read,
- .write_string = blkiocg_file_write,
- .max_write_len = 256,
- },
-
- {
- .name = "throttle.write_iops_device",
- .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
- BLKIO_THROTL_write_iops_device),
- .read_seq_string = blkiocg_file_read,
- .write_string = blkiocg_file_write,
- .max_write_len = 256,
- },
- {
- .name = "throttle.io_service_bytes",
- .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
- BLKIO_THROTL_io_service_bytes),
- .read_map = blkiocg_file_read_map,
- },
- {
- .name = "throttle.io_serviced",
- .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
- BLKIO_THROTL_io_serviced),
- .read_map = blkiocg_file_read_map,
- },
-#endif /* CONFIG_BLK_DEV_THROTTLING */
-
-#ifdef CONFIG_DEBUG_BLK_CGROUP
- {
- .name = "avg_queue_size",
- .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
- BLKIO_PROP_avg_queue_size),
- .read_map = blkiocg_file_read_map,
- },
- {
- .name = "group_wait_time",
- .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
- BLKIO_PROP_group_wait_time),
- .read_map = blkiocg_file_read_map,
- },
- {
- .name = "idle_time",
- .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
- BLKIO_PROP_idle_time),
- .read_map = blkiocg_file_read_map,
- },
- {
- .name = "empty_time",
- .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
- BLKIO_PROP_empty_time),
- .read_map = blkiocg_file_read_map,
- },
- {
- .name = "dequeue",
- .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
- BLKIO_PROP_dequeue),
- .read_map = blkiocg_file_read_map,
- },
- {
- .name = "unaccounted_time",
- .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
- BLKIO_PROP_unaccounted_time),
- .read_map = blkiocg_file_read_map,
- },
-#endif
{ } /* terminate */
};
-static void blkiocg_destroy(struct cgroup *cgroup)
+/**
+ * blkcg_pre_destroy - cgroup pre_destroy callback
+ * @cgroup: cgroup of interest
+ *
+ * This function is called when @cgroup is about to go away and responsible
+ * for shooting down all blkgs associated with @cgroup. blkgs should be
+ * removed while holding both q and blkcg locks. As blkcg lock is nested
+ * inside q lock, this function performs reverse double lock dancing.
+ *
+ * This is the blkcg counterpart of ioc_release_fn().
+ */
+static int blkcg_pre_destroy(struct cgroup *cgroup)
{
- struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
- unsigned long flags;
- struct blkio_group *blkg;
- void *key;
- struct blkio_policy_type *blkiop;
- struct blkio_policy_node *pn, *pntmp;
+ struct blkcg *blkcg = cgroup_to_blkcg(cgroup);
- rcu_read_lock();
- do {
- spin_lock_irqsave(&blkcg->lock, flags);
+ spin_lock_irq(&blkcg->lock);
- if (hlist_empty(&blkcg->blkg_list)) {
- spin_unlock_irqrestore(&blkcg->lock, flags);
- break;
+ while (!hlist_empty(&blkcg->blkg_list)) {
+ struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first,
+ struct blkcg_gq, blkcg_node);
+ struct request_queue *q = blkg->q;
+
+ if (spin_trylock(q->queue_lock)) {
+ blkg_destroy(blkg);
+ spin_unlock(q->queue_lock);
+ } else {
+ spin_unlock_irq(&blkcg->lock);
+ cpu_relax();
+ spin_lock_irq(&blkcg->lock);
}
-
- blkg = hlist_entry(blkcg->blkg_list.first, struct blkio_group,
- blkcg_node);
- key = rcu_dereference(blkg->key);
- __blkiocg_del_blkio_group(blkg);
-
- spin_unlock_irqrestore(&blkcg->lock, flags);
-
- /*
- * This blkio_group is being unlinked as associated cgroup is
- * going away. Let all the IO controlling policies know about
- * this event.
- */
- spin_lock(&blkio_list_lock);
- list_for_each_entry(blkiop, &blkio_list, list) {
- if (blkiop->plid != blkg->plid)
- continue;
- blkiop->ops.blkio_unlink_group_fn(key, blkg);
- }
- spin_unlock(&blkio_list_lock);
- } while (1);
-
- list_for_each_entry_safe(pn, pntmp, &blkcg->policy_list, node) {
- blkio_policy_delete_node(pn);
- kfree(pn);
}
- free_css_id(&blkio_subsys, &blkcg->css);
- rcu_read_unlock();
- if (blkcg != &blkio_root_cgroup)
+ spin_unlock_irq(&blkcg->lock);
+ return 0;
+}
+
+static void blkcg_destroy(struct cgroup *cgroup)
+{
+ struct blkcg *blkcg = cgroup_to_blkcg(cgroup);
+
+ if (blkcg != &blkcg_root)
kfree(blkcg);
}
-static struct cgroup_subsys_state *blkiocg_create(struct cgroup *cgroup)
+static struct cgroup_subsys_state *blkcg_create(struct cgroup *cgroup)
{
- struct blkio_cgroup *blkcg;
+ static atomic64_t id_seq = ATOMIC64_INIT(0);
+ struct blkcg *blkcg;
struct cgroup *parent = cgroup->parent;
if (!parent) {
- blkcg = &blkio_root_cgroup;
+ blkcg = &blkcg_root;
goto done;
}
@@ -1582,22 +624,68 @@
if (!blkcg)
return ERR_PTR(-ENOMEM);
- blkcg->weight = BLKIO_WEIGHT_DEFAULT;
+ blkcg->cfq_weight = CFQ_WEIGHT_DEFAULT;
+ blkcg->id = atomic64_inc_return(&id_seq); /* root is 0, start from 1 */
done:
spin_lock_init(&blkcg->lock);
+ INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_ATOMIC);
INIT_HLIST_HEAD(&blkcg->blkg_list);
- INIT_LIST_HEAD(&blkcg->policy_list);
return &blkcg->css;
}
+/**
+ * blkcg_init_queue - initialize blkcg part of request queue
+ * @q: request_queue to initialize
+ *
+ * Called from blk_alloc_queue_node(). Responsible for initializing blkcg
+ * part of new request_queue @q.
+ *
+ * RETURNS:
+ * 0 on success, -errno on failure.
+ */
+int blkcg_init_queue(struct request_queue *q)
+{
+ might_sleep();
+
+ return blk_throtl_init(q);
+}
+
+/**
+ * blkcg_drain_queue - drain blkcg part of request_queue
+ * @q: request_queue to drain
+ *
+ * Called from blk_drain_queue(). Responsible for draining blkcg part.
+ */
+void blkcg_drain_queue(struct request_queue *q)
+{
+ lockdep_assert_held(q->queue_lock);
+
+ blk_throtl_drain(q);
+}
+
+/**
+ * blkcg_exit_queue - exit and release blkcg part of request_queue
+ * @q: request_queue being released
+ *
+ * Called from blk_release_queue(). Responsible for exiting blkcg part.
+ */
+void blkcg_exit_queue(struct request_queue *q)
+{
+ spin_lock_irq(q->queue_lock);
+ blkg_destroy_all(q);
+ spin_unlock_irq(q->queue_lock);
+
+ blk_throtl_exit(q);
+}
+
/*
* We cannot support shared io contexts, as we have no mean to support
* two tasks with the same ioc in two different groups without major rework
* of the main cic data structures. For now we allow a task to change
* its cgroup only if it's the only owner of its ioc.
*/
-static int blkiocg_can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
+static int blkcg_can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
{
struct task_struct *task;
struct io_context *ioc;
@@ -1616,63 +704,213 @@
return ret;
}
-static void blkiocg_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
-{
- struct task_struct *task;
- struct io_context *ioc;
-
- cgroup_taskset_for_each(task, cgrp, tset) {
- /* we don't lose anything even if ioc allocation fails */
- ioc = get_task_io_context(task, GFP_ATOMIC, NUMA_NO_NODE);
- if (ioc) {
- ioc_cgroup_changed(ioc);
- put_io_context(ioc);
- }
- }
-}
-
struct cgroup_subsys blkio_subsys = {
.name = "blkio",
- .create = blkiocg_create,
- .can_attach = blkiocg_can_attach,
- .attach = blkiocg_attach,
- .destroy = blkiocg_destroy,
-#ifdef CONFIG_BLK_CGROUP
- /* note: blkio_subsys_id is otherwise defined in blk-cgroup.h */
+ .create = blkcg_create,
+ .can_attach = blkcg_can_attach,
+ .pre_destroy = blkcg_pre_destroy,
+ .destroy = blkcg_destroy,
.subsys_id = blkio_subsys_id,
-#endif
- .base_cftypes = blkio_files,
- .use_id = 1,
+ .base_cftypes = blkcg_files,
.module = THIS_MODULE,
};
EXPORT_SYMBOL_GPL(blkio_subsys);
-void blkio_policy_register(struct blkio_policy_type *blkiop)
+/**
+ * blkcg_activate_policy - activate a blkcg policy on a request_queue
+ * @q: request_queue of interest
+ * @pol: blkcg policy to activate
+ *
+ * Activate @pol on @q. Requires %GFP_KERNEL context. @q goes through
+ * bypass mode to populate its blkgs with policy_data for @pol.
+ *
+ * Activation happens with @q bypassed, so nobody would be accessing blkgs
+ * from IO path. Update of each blkg is protected by both queue and blkcg
+ * locks so that holding either lock and testing blkcg_policy_enabled() is
+ * always enough for dereferencing policy data.
+ *
+ * The caller is responsible for synchronizing [de]activations and policy
+ * [un]registerations. Returns 0 on success, -errno on failure.
+ */
+int blkcg_activate_policy(struct request_queue *q,
+ const struct blkcg_policy *pol)
{
- spin_lock(&blkio_list_lock);
- list_add_tail(&blkiop->list, &blkio_list);
- spin_unlock(&blkio_list_lock);
-}
-EXPORT_SYMBOL_GPL(blkio_policy_register);
+ LIST_HEAD(pds);
+ struct blkcg_gq *blkg;
+ struct blkg_policy_data *pd, *n;
+ int cnt = 0, ret;
-void blkio_policy_unregister(struct blkio_policy_type *blkiop)
+ if (blkcg_policy_enabled(q, pol))
+ return 0;
+
+ blk_queue_bypass_start(q);
+
+ /* make sure the root blkg exists and count the existing blkgs */
+ spin_lock_irq(q->queue_lock);
+
+ rcu_read_lock();
+ blkg = __blkg_lookup_create(&blkcg_root, q);
+ rcu_read_unlock();
+
+ if (IS_ERR(blkg)) {
+ ret = PTR_ERR(blkg);
+ goto out_unlock;
+ }
+ q->root_blkg = blkg;
+
+ list_for_each_entry(blkg, &q->blkg_list, q_node)
+ cnt++;
+
+ spin_unlock_irq(q->queue_lock);
+
+ /* allocate policy_data for all existing blkgs */
+ while (cnt--) {
+ pd = kzalloc_node(pol->pd_size, GFP_KERNEL, q->node);
+ if (!pd) {
+ ret = -ENOMEM;
+ goto out_free;
+ }
+ list_add_tail(&pd->alloc_node, &pds);
+ }
+
+ /*
+ * Install the allocated pds. With @q bypassing, no new blkg
+ * should have been created while the queue lock was dropped.
+ */
+ spin_lock_irq(q->queue_lock);
+
+ list_for_each_entry(blkg, &q->blkg_list, q_node) {
+ if (WARN_ON(list_empty(&pds))) {
+ /* umm... this shouldn't happen, just abort */
+ ret = -ENOMEM;
+ goto out_unlock;
+ }
+ pd = list_first_entry(&pds, struct blkg_policy_data, alloc_node);
+ list_del_init(&pd->alloc_node);
+
+ /* grab blkcg lock too while installing @pd on @blkg */
+ spin_lock(&blkg->blkcg->lock);
+
+ blkg->pd[pol->plid] = pd;
+ pd->blkg = blkg;
+ pol->pd_init_fn(blkg);
+
+ spin_unlock(&blkg->blkcg->lock);
+ }
+
+ __set_bit(pol->plid, q->blkcg_pols);
+ ret = 0;
+out_unlock:
+ spin_unlock_irq(q->queue_lock);
+out_free:
+ blk_queue_bypass_end(q);
+ list_for_each_entry_safe(pd, n, &pds, alloc_node)
+ kfree(pd);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(blkcg_activate_policy);
+
+/**
+ * blkcg_deactivate_policy - deactivate a blkcg policy on a request_queue
+ * @q: request_queue of interest
+ * @pol: blkcg policy to deactivate
+ *
+ * Deactivate @pol on @q. Follows the same synchronization rules as
+ * blkcg_activate_policy().
+ */
+void blkcg_deactivate_policy(struct request_queue *q,
+ const struct blkcg_policy *pol)
{
- spin_lock(&blkio_list_lock);
- list_del_init(&blkiop->list);
- spin_unlock(&blkio_list_lock);
-}
-EXPORT_SYMBOL_GPL(blkio_policy_unregister);
+ struct blkcg_gq *blkg;
-static int __init init_cgroup_blkio(void)
+ if (!blkcg_policy_enabled(q, pol))
+ return;
+
+ blk_queue_bypass_start(q);
+ spin_lock_irq(q->queue_lock);
+
+ __clear_bit(pol->plid, q->blkcg_pols);
+
+ /* if no policy is left, no need for blkgs - shoot them down */
+ if (bitmap_empty(q->blkcg_pols, BLKCG_MAX_POLS))
+ blkg_destroy_all(q);
+
+ list_for_each_entry(blkg, &q->blkg_list, q_node) {
+ /* grab blkcg lock too while removing @pd from @blkg */
+ spin_lock(&blkg->blkcg->lock);
+
+ if (pol->pd_exit_fn)
+ pol->pd_exit_fn(blkg);
+
+ kfree(blkg->pd[pol->plid]);
+ blkg->pd[pol->plid] = NULL;
+
+ spin_unlock(&blkg->blkcg->lock);
+ }
+
+ spin_unlock_irq(q->queue_lock);
+ blk_queue_bypass_end(q);
+}
+EXPORT_SYMBOL_GPL(blkcg_deactivate_policy);
+
+/**
+ * blkcg_policy_register - register a blkcg policy
+ * @pol: blkcg policy to register
+ *
+ * Register @pol with blkcg core. Might sleep and @pol may be modified on
+ * successful registration. Returns 0 on success and -errno on failure.
+ */
+int blkcg_policy_register(struct blkcg_policy *pol)
{
- return cgroup_load_subsys(&blkio_subsys);
-}
+ int i, ret;
-static void __exit exit_cgroup_blkio(void)
+ if (WARN_ON(pol->pd_size < sizeof(struct blkg_policy_data)))
+ return -EINVAL;
+
+ mutex_lock(&blkcg_pol_mutex);
+
+ /* find an empty slot */
+ ret = -ENOSPC;
+ for (i = 0; i < BLKCG_MAX_POLS; i++)
+ if (!blkcg_policy[i])
+ break;
+ if (i >= BLKCG_MAX_POLS)
+ goto out_unlock;
+
+ /* register and update blkgs */
+ pol->plid = i;
+ blkcg_policy[i] = pol;
+
+ /* everything is in place, add intf files for the new policy */
+ if (pol->cftypes)
+ WARN_ON(cgroup_add_cftypes(&blkio_subsys, pol->cftypes));
+ ret = 0;
+out_unlock:
+ mutex_unlock(&blkcg_pol_mutex);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(blkcg_policy_register);
+
+/**
+ * blkcg_policy_unregister - unregister a blkcg policy
+ * @pol: blkcg policy to unregister
+ *
+ * Undo blkcg_policy_register(@pol). Might sleep.
+ */
+void blkcg_policy_unregister(struct blkcg_policy *pol)
{
- cgroup_unload_subsys(&blkio_subsys);
-}
+ mutex_lock(&blkcg_pol_mutex);
-module_init(init_cgroup_blkio);
-module_exit(exit_cgroup_blkio);
-MODULE_LICENSE("GPL");
+ if (WARN_ON(blkcg_policy[pol->plid] != pol))
+ goto out_unlock;
+
+ /* kill the intf files first */
+ if (pol->cftypes)
+ cgroup_rm_cftypes(&blkio_subsys, pol->cftypes);
+
+ /* unregister and update blkgs */
+ blkcg_policy[pol->plid] = NULL;
+out_unlock:
+ mutex_unlock(&blkcg_pol_mutex);
+}
+EXPORT_SYMBOL_GPL(blkcg_policy_unregister);
diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h
index 6f3ace7..8ac457c 100644
--- a/block/blk-cgroup.h
+++ b/block/blk-cgroup.h
@@ -15,350 +15,371 @@
#include <linux/cgroup.h>
#include <linux/u64_stats_sync.h>
-
-enum blkio_policy_id {
- BLKIO_POLICY_PROP = 0, /* Proportional Bandwidth division */
- BLKIO_POLICY_THROTL, /* Throttling */
-};
+#include <linux/seq_file.h>
+#include <linux/radix-tree.h>
/* Max limits for throttle policy */
#define THROTL_IOPS_MAX UINT_MAX
-#if defined(CONFIG_BLK_CGROUP) || defined(CONFIG_BLK_CGROUP_MODULE)
+/* CFQ specific, out here for blkcg->cfq_weight */
+#define CFQ_WEIGHT_MIN 10
+#define CFQ_WEIGHT_MAX 1000
+#define CFQ_WEIGHT_DEFAULT 500
-#ifndef CONFIG_BLK_CGROUP
-/* When blk-cgroup is a module, its subsys_id isn't a compile-time constant */
-extern struct cgroup_subsys blkio_subsys;
-#define blkio_subsys_id blkio_subsys.subsys_id
-#endif
+#ifdef CONFIG_BLK_CGROUP
-enum stat_type {
- /* Total time spent (in ns) between request dispatch to the driver and
- * request completion for IOs doen by this cgroup. This may not be
- * accurate when NCQ is turned on. */
- BLKIO_STAT_SERVICE_TIME = 0,
- /* Total time spent waiting in scheduler queue in ns */
- BLKIO_STAT_WAIT_TIME,
- /* Number of IOs queued up */
- BLKIO_STAT_QUEUED,
- /* All the single valued stats go below this */
- BLKIO_STAT_TIME,
-#ifdef CONFIG_DEBUG_BLK_CGROUP
- /* Time not charged to this cgroup */
- BLKIO_STAT_UNACCOUNTED_TIME,
- BLKIO_STAT_AVG_QUEUE_SIZE,
- BLKIO_STAT_IDLE_TIME,
- BLKIO_STAT_EMPTY_TIME,
- BLKIO_STAT_GROUP_WAIT_TIME,
- BLKIO_STAT_DEQUEUE
-#endif
+enum blkg_rwstat_type {
+ BLKG_RWSTAT_READ,
+ BLKG_RWSTAT_WRITE,
+ BLKG_RWSTAT_SYNC,
+ BLKG_RWSTAT_ASYNC,
+
+ BLKG_RWSTAT_NR,
+ BLKG_RWSTAT_TOTAL = BLKG_RWSTAT_NR,
};
-/* Per cpu stats */
-enum stat_type_cpu {
- BLKIO_STAT_CPU_SECTORS,
- /* Total bytes transferred */
- BLKIO_STAT_CPU_SERVICE_BYTES,
- /* Total IOs serviced, post merge */
- BLKIO_STAT_CPU_SERVICED,
- /* Number of IOs merged */
- BLKIO_STAT_CPU_MERGED,
- BLKIO_STAT_CPU_NR
+struct blkcg_gq;
+
+struct blkcg {
+ struct cgroup_subsys_state css;
+ spinlock_t lock;
+
+ struct radix_tree_root blkg_tree;
+ struct blkcg_gq *blkg_hint;
+ struct hlist_head blkg_list;
+
+ /* for policies to test whether associated blkcg has changed */
+ uint64_t id;
+
+ /* TODO: per-policy storage in blkcg */
+ unsigned int cfq_weight; /* belongs to cfq */
};
-enum stat_sub_type {
- BLKIO_STAT_READ = 0,
- BLKIO_STAT_WRITE,
- BLKIO_STAT_SYNC,
- BLKIO_STAT_ASYNC,
- BLKIO_STAT_TOTAL
+struct blkg_stat {
+ struct u64_stats_sync syncp;
+ uint64_t cnt;
};
-/* blkg state flags */
-enum blkg_state_flags {
- BLKG_waiting = 0,
- BLKG_idling,
- BLKG_empty,
+struct blkg_rwstat {
+ struct u64_stats_sync syncp;
+ uint64_t cnt[BLKG_RWSTAT_NR];
};
-/* cgroup files owned by proportional weight policy */
-enum blkcg_file_name_prop {
- BLKIO_PROP_weight = 1,
- BLKIO_PROP_weight_device,
- BLKIO_PROP_io_service_bytes,
- BLKIO_PROP_io_serviced,
- BLKIO_PROP_time,
- BLKIO_PROP_sectors,
- BLKIO_PROP_unaccounted_time,
- BLKIO_PROP_io_service_time,
- BLKIO_PROP_io_wait_time,
- BLKIO_PROP_io_merged,
- BLKIO_PROP_io_queued,
- BLKIO_PROP_avg_queue_size,
- BLKIO_PROP_group_wait_time,
- BLKIO_PROP_idle_time,
- BLKIO_PROP_empty_time,
- BLKIO_PROP_dequeue,
+/*
+ * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a
+ * request_queue (q). This is used by blkcg policies which need to track
+ * information per blkcg - q pair.
+ *
+ * There can be multiple active blkcg policies and each has its private
+ * data on each blkg, the size of which is determined by
+ * blkcg_policy->pd_size. blkcg core allocates and frees such areas
+ * together with blkg and invokes pd_init/exit_fn() methods.
+ *
+ * Such private data must embed struct blkg_policy_data (pd) at the
+ * beginning and pd_size can't be smaller than pd.
+ */
+struct blkg_policy_data {
+ /* the blkg this per-policy data belongs to */
+ struct blkcg_gq *blkg;
+
+ /* used during policy activation */
+ struct list_head alloc_node;
};
-/* cgroup files owned by throttle policy */
-enum blkcg_file_name_throtl {
- BLKIO_THROTL_read_bps_device,
- BLKIO_THROTL_write_bps_device,
- BLKIO_THROTL_read_iops_device,
- BLKIO_THROTL_write_iops_device,
- BLKIO_THROTL_io_service_bytes,
- BLKIO_THROTL_io_serviced,
+/* association between a blk cgroup and a request queue */
+struct blkcg_gq {
+ /* Pointer to the associated request_queue */
+ struct request_queue *q;
+ struct list_head q_node;
+ struct hlist_node blkcg_node;
+ struct blkcg *blkcg;
+ /* reference count */
+ int refcnt;
+
+ struct blkg_policy_data *pd[BLKCG_MAX_POLS];
+
+ struct rcu_head rcu_head;
};
-struct blkio_cgroup {
- struct cgroup_subsys_state css;
- unsigned int weight;
- spinlock_t lock;
- struct hlist_head blkg_list;
- struct list_head policy_list; /* list of blkio_policy_node */
+typedef void (blkcg_pol_init_pd_fn)(struct blkcg_gq *blkg);
+typedef void (blkcg_pol_exit_pd_fn)(struct blkcg_gq *blkg);
+typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkcg_gq *blkg);
+
+struct blkcg_policy {
+ int plid;
+ /* policy specific private data size */
+ size_t pd_size;
+ /* cgroup files for the policy */
+ struct cftype *cftypes;
+
+ /* operations */
+ blkcg_pol_init_pd_fn *pd_init_fn;
+ blkcg_pol_exit_pd_fn *pd_exit_fn;
+ blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn;
};
-struct blkio_group_stats {
- /* total disk time and nr sectors dispatched by this group */
- uint64_t time;
- uint64_t stat_arr[BLKIO_STAT_QUEUED + 1][BLKIO_STAT_TOTAL];
-#ifdef CONFIG_DEBUG_BLK_CGROUP
- /* Time not charged to this cgroup */
- uint64_t unaccounted_time;
+extern struct blkcg blkcg_root;
- /* Sum of number of IOs queued across all samples */
- uint64_t avg_queue_size_sum;
- /* Count of samples taken for average */
- uint64_t avg_queue_size_samples;
- /* How many times this group has been removed from service tree */
- unsigned long dequeue;
-
- /* Total time spent waiting for it to be assigned a timeslice. */
- uint64_t group_wait_time;
- uint64_t start_group_wait_time;
-
- /* Time spent idling for this blkio_group */
- uint64_t idle_time;
- uint64_t start_idle_time;
- /*
- * Total time when we have requests queued and do not contain the
- * current active queue.
- */
- uint64_t empty_time;
- uint64_t start_empty_time;
- uint16_t flags;
-#endif
-};
-
-/* Per cpu blkio group stats */
-struct blkio_group_stats_cpu {
- uint64_t sectors;
- uint64_t stat_arr_cpu[BLKIO_STAT_CPU_NR][BLKIO_STAT_TOTAL];
- struct u64_stats_sync syncp;
-};
-
-struct blkio_group {
- /* An rcu protected unique identifier for the group */
- void *key;
- struct hlist_node blkcg_node;
- unsigned short blkcg_id;
- /* Store cgroup path */
- char path[128];
- /* The device MKDEV(major, minor), this group has been created for */
- dev_t dev;
- /* policy which owns this blk group */
- enum blkio_policy_id plid;
-
- /* Need to serialize the stats in the case of reset/update */
- spinlock_t stats_lock;
- struct blkio_group_stats stats;
- /* Per cpu stats pointer */
- struct blkio_group_stats_cpu __percpu *stats_cpu;
-};
-
-struct blkio_policy_node {
- struct list_head node;
- dev_t dev;
- /* This node belongs to max bw policy or porportional weight policy */
- enum blkio_policy_id plid;
- /* cgroup file to which this rule belongs to */
- int fileid;
-
- union {
- unsigned int weight;
- /*
- * Rate read/write in terms of bytes per second
- * Whether this rate represents read or write is determined
- * by file type "fileid".
- */
- u64 bps;
- unsigned int iops;
- } val;
-};
-
-extern unsigned int blkcg_get_weight(struct blkio_cgroup *blkcg,
- dev_t dev);
-extern uint64_t blkcg_get_read_bps(struct blkio_cgroup *blkcg,
- dev_t dev);
-extern uint64_t blkcg_get_write_bps(struct blkio_cgroup *blkcg,
- dev_t dev);
-extern unsigned int blkcg_get_read_iops(struct blkio_cgroup *blkcg,
- dev_t dev);
-extern unsigned int blkcg_get_write_iops(struct blkio_cgroup *blkcg,
- dev_t dev);
-
-typedef void (blkio_unlink_group_fn) (void *key, struct blkio_group *blkg);
-
-typedef void (blkio_update_group_weight_fn) (void *key,
- struct blkio_group *blkg, unsigned int weight);
-typedef void (blkio_update_group_read_bps_fn) (void * key,
- struct blkio_group *blkg, u64 read_bps);
-typedef void (blkio_update_group_write_bps_fn) (void *key,
- struct blkio_group *blkg, u64 write_bps);
-typedef void (blkio_update_group_read_iops_fn) (void *key,
- struct blkio_group *blkg, unsigned int read_iops);
-typedef void (blkio_update_group_write_iops_fn) (void *key,
- struct blkio_group *blkg, unsigned int write_iops);
-
-struct blkio_policy_ops {
- blkio_unlink_group_fn *blkio_unlink_group_fn;
- blkio_update_group_weight_fn *blkio_update_group_weight_fn;
- blkio_update_group_read_bps_fn *blkio_update_group_read_bps_fn;
- blkio_update_group_write_bps_fn *blkio_update_group_write_bps_fn;
- blkio_update_group_read_iops_fn *blkio_update_group_read_iops_fn;
- blkio_update_group_write_iops_fn *blkio_update_group_write_iops_fn;
-};
-
-struct blkio_policy_type {
- struct list_head list;
- struct blkio_policy_ops ops;
- enum blkio_policy_id plid;
-};
+struct blkcg *cgroup_to_blkcg(struct cgroup *cgroup);
+struct blkcg *bio_blkcg(struct bio *bio);
+struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q);
+struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
+ struct request_queue *q);
+int blkcg_init_queue(struct request_queue *q);
+void blkcg_drain_queue(struct request_queue *q);
+void blkcg_exit_queue(struct request_queue *q);
/* Blkio controller policy registration */
-extern void blkio_policy_register(struct blkio_policy_type *);
-extern void blkio_policy_unregister(struct blkio_policy_type *);
+int blkcg_policy_register(struct blkcg_policy *pol);
+void blkcg_policy_unregister(struct blkcg_policy *pol);
+int blkcg_activate_policy(struct request_queue *q,
+ const struct blkcg_policy *pol);
+void blkcg_deactivate_policy(struct request_queue *q,
+ const struct blkcg_policy *pol);
-static inline char *blkg_path(struct blkio_group *blkg)
+void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
+ u64 (*prfill)(struct seq_file *,
+ struct blkg_policy_data *, int),
+ const struct blkcg_policy *pol, int data,
+ bool show_total);
+u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v);
+u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
+ const struct blkg_rwstat *rwstat);
+u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off);
+u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
+ int off);
+
+struct blkg_conf_ctx {
+ struct gendisk *disk;
+ struct blkcg_gq *blkg;
+ u64 v;
+};
+
+int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
+ const char *input, struct blkg_conf_ctx *ctx);
+void blkg_conf_finish(struct blkg_conf_ctx *ctx);
+
+
+/**
+ * blkg_to_pdata - get policy private data
+ * @blkg: blkg of interest
+ * @pol: policy of interest
+ *
+ * Return pointer to private data associated with the @blkg-@pol pair.
+ */
+static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
+ struct blkcg_policy *pol)
{
- return blkg->path;
+ return blkg ? blkg->pd[pol->plid] : NULL;
}
-#else
+/**
+ * pdata_to_blkg - get blkg associated with policy private data
+ * @pd: policy private data of interest
+ *
+ * @pd is policy private data. Determine the blkg it's associated with.
+ */
+static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd)
+{
+ return pd ? pd->blkg : NULL;
+}
-struct blkio_group {
-};
+/**
+ * blkg_path - format cgroup path of blkg
+ * @blkg: blkg of interest
+ * @buf: target buffer
+ * @buflen: target buffer length
+ *
+ * Format the path of the cgroup of @blkg into @buf.
+ */
+static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
+{
+ int ret;
-struct blkio_policy_type {
-};
+ rcu_read_lock();
+ ret = cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
+ rcu_read_unlock();
+ if (ret)
+ strncpy(buf, "<unavailable>", buflen);
+ return ret;
+}
-static inline void blkio_policy_register(struct blkio_policy_type *blkiop) { }
-static inline void blkio_policy_unregister(struct blkio_policy_type *blkiop) { }
+/**
+ * blkg_get - get a blkg reference
+ * @blkg: blkg to get
+ *
+ * The caller should be holding queue_lock and an existing reference.
+ */
+static inline void blkg_get(struct blkcg_gq *blkg)
+{
+ lockdep_assert_held(blkg->q->queue_lock);
+ WARN_ON_ONCE(!blkg->refcnt);
+ blkg->refcnt++;
+}
-static inline char *blkg_path(struct blkio_group *blkg) { return NULL; }
+void __blkg_release(struct blkcg_gq *blkg);
-#endif
+/**
+ * blkg_put - put a blkg reference
+ * @blkg: blkg to put
+ *
+ * The caller should be holding queue_lock.
+ */
+static inline void blkg_put(struct blkcg_gq *blkg)
+{
+ lockdep_assert_held(blkg->q->queue_lock);
+ WARN_ON_ONCE(blkg->refcnt <= 0);
+ if (!--blkg->refcnt)
+ __blkg_release(blkg);
+}
-#define BLKIO_WEIGHT_MIN 10
-#define BLKIO_WEIGHT_MAX 1000
-#define BLKIO_WEIGHT_DEFAULT 500
+/**
+ * blkg_stat_add - add a value to a blkg_stat
+ * @stat: target blkg_stat
+ * @val: value to add
+ *
+ * Add @val to @stat. The caller is responsible for synchronizing calls to
+ * this function.
+ */
+static inline void blkg_stat_add(struct blkg_stat *stat, uint64_t val)
+{
+ u64_stats_update_begin(&stat->syncp);
+ stat->cnt += val;
+ u64_stats_update_end(&stat->syncp);
+}
-#ifdef CONFIG_DEBUG_BLK_CGROUP
-void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg);
-void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
- unsigned long dequeue);
-void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg);
-void blkiocg_update_idle_time_stats(struct blkio_group *blkg);
-void blkiocg_set_start_empty_time(struct blkio_group *blkg);
+/**
+ * blkg_stat_read - read the current value of a blkg_stat
+ * @stat: blkg_stat to read
+ *
+ * Read the current value of @stat. This function can be called without
+ * synchroniztion and takes care of u64 atomicity.
+ */
+static inline uint64_t blkg_stat_read(struct blkg_stat *stat)
+{
+ unsigned int start;
+ uint64_t v;
-#define BLKG_FLAG_FNS(name) \
-static inline void blkio_mark_blkg_##name( \
- struct blkio_group_stats *stats) \
-{ \
- stats->flags |= (1 << BLKG_##name); \
-} \
-static inline void blkio_clear_blkg_##name( \
- struct blkio_group_stats *stats) \
-{ \
- stats->flags &= ~(1 << BLKG_##name); \
-} \
-static inline int blkio_blkg_##name(struct blkio_group_stats *stats) \
-{ \
- return (stats->flags & (1 << BLKG_##name)) != 0; \
-} \
+ do {
+ start = u64_stats_fetch_begin(&stat->syncp);
+ v = stat->cnt;
+ } while (u64_stats_fetch_retry(&stat->syncp, start));
-BLKG_FLAG_FNS(waiting)
-BLKG_FLAG_FNS(idling)
-BLKG_FLAG_FNS(empty)
-#undef BLKG_FLAG_FNS
-#else
-static inline void blkiocg_update_avg_queue_size_stats(
- struct blkio_group *blkg) {}
-static inline void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
- unsigned long dequeue) {}
-static inline void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg)
-{}
-static inline void blkiocg_update_idle_time_stats(struct blkio_group *blkg) {}
-static inline void blkiocg_set_start_empty_time(struct blkio_group *blkg) {}
-#endif
+ return v;
+}
-#if defined(CONFIG_BLK_CGROUP) || defined(CONFIG_BLK_CGROUP_MODULE)
-extern struct blkio_cgroup blkio_root_cgroup;
-extern struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup);
-extern struct blkio_cgroup *task_blkio_cgroup(struct task_struct *tsk);
-extern void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
- struct blkio_group *blkg, void *key, dev_t dev,
- enum blkio_policy_id plid);
-extern int blkio_alloc_blkg_stats(struct blkio_group *blkg);
-extern int blkiocg_del_blkio_group(struct blkio_group *blkg);
-extern struct blkio_group *blkiocg_lookup_group(struct blkio_cgroup *blkcg,
- void *key);
-void blkiocg_update_timeslice_used(struct blkio_group *blkg,
- unsigned long time,
- unsigned long unaccounted_time);
-void blkiocg_update_dispatch_stats(struct blkio_group *blkg, uint64_t bytes,
- bool direction, bool sync);
-void blkiocg_update_completion_stats(struct blkio_group *blkg,
- uint64_t start_time, uint64_t io_start_time, bool direction, bool sync);
-void blkiocg_update_io_merged_stats(struct blkio_group *blkg, bool direction,
- bool sync);
-void blkiocg_update_io_add_stats(struct blkio_group *blkg,
- struct blkio_group *curr_blkg, bool direction, bool sync);
-void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
- bool direction, bool sync);
-#else
+/**
+ * blkg_stat_reset - reset a blkg_stat
+ * @stat: blkg_stat to reset
+ */
+static inline void blkg_stat_reset(struct blkg_stat *stat)
+{
+ stat->cnt = 0;
+}
+
+/**
+ * blkg_rwstat_add - add a value to a blkg_rwstat
+ * @rwstat: target blkg_rwstat
+ * @rw: mask of REQ_{WRITE|SYNC}
+ * @val: value to add
+ *
+ * Add @val to @rwstat. The counters are chosen according to @rw. The
+ * caller is responsible for synchronizing calls to this function.
+ */
+static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
+ int rw, uint64_t val)
+{
+ u64_stats_update_begin(&rwstat->syncp);
+
+ if (rw & REQ_WRITE)
+ rwstat->cnt[BLKG_RWSTAT_WRITE] += val;
+ else
+ rwstat->cnt[BLKG_RWSTAT_READ] += val;
+ if (rw & REQ_SYNC)
+ rwstat->cnt[BLKG_RWSTAT_SYNC] += val;
+ else
+ rwstat->cnt[BLKG_RWSTAT_ASYNC] += val;
+
+ u64_stats_update_end(&rwstat->syncp);
+}
+
+/**
+ * blkg_rwstat_read - read the current values of a blkg_rwstat
+ * @rwstat: blkg_rwstat to read
+ *
+ * Read the current snapshot of @rwstat and return it as the return value.
+ * This function can be called without synchronization and takes care of
+ * u64 atomicity.
+ */
+static inline struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat)
+{
+ unsigned int start;
+ struct blkg_rwstat tmp;
+
+ do {
+ start = u64_stats_fetch_begin(&rwstat->syncp);
+ tmp = *rwstat;
+ } while (u64_stats_fetch_retry(&rwstat->syncp, start));
+
+ return tmp;
+}
+
+/**
+ * blkg_rwstat_sum - read the total count of a blkg_rwstat
+ * @rwstat: blkg_rwstat to read
+ *
+ * Return the total count of @rwstat regardless of the IO direction. This
+ * function can be called without synchronization and takes care of u64
+ * atomicity.
+ */
+static inline uint64_t blkg_rwstat_sum(struct blkg_rwstat *rwstat)
+{
+ struct blkg_rwstat tmp = blkg_rwstat_read(rwstat);
+
+ return tmp.cnt[BLKG_RWSTAT_READ] + tmp.cnt[BLKG_RWSTAT_WRITE];
+}
+
+/**
+ * blkg_rwstat_reset - reset a blkg_rwstat
+ * @rwstat: blkg_rwstat to reset
+ */
+static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat)
+{
+ memset(rwstat->cnt, 0, sizeof(rwstat->cnt));
+}
+
+#else /* CONFIG_BLK_CGROUP */
+
struct cgroup;
-static inline struct blkio_cgroup *
-cgroup_to_blkio_cgroup(struct cgroup *cgroup) { return NULL; }
-static inline struct blkio_cgroup *
-task_blkio_cgroup(struct task_struct *tsk) { return NULL; }
-static inline void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
- struct blkio_group *blkg, void *key, dev_t dev,
- enum blkio_policy_id plid) {}
+struct blkg_policy_data {
+};
-static inline int blkio_alloc_blkg_stats(struct blkio_group *blkg) { return 0; }
+struct blkcg_gq {
+};
-static inline int
-blkiocg_del_blkio_group(struct blkio_group *blkg) { return 0; }
+struct blkcg_policy {
+};
-static inline struct blkio_group *
-blkiocg_lookup_group(struct blkio_cgroup *blkcg, void *key) { return NULL; }
-static inline void blkiocg_update_timeslice_used(struct blkio_group *blkg,
- unsigned long time,
- unsigned long unaccounted_time)
-{}
-static inline void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
- uint64_t bytes, bool direction, bool sync) {}
-static inline void blkiocg_update_completion_stats(struct blkio_group *blkg,
- uint64_t start_time, uint64_t io_start_time, bool direction,
- bool sync) {}
-static inline void blkiocg_update_io_merged_stats(struct blkio_group *blkg,
- bool direction, bool sync) {}
-static inline void blkiocg_update_io_add_stats(struct blkio_group *blkg,
- struct blkio_group *curr_blkg, bool direction, bool sync) {}
-static inline void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
- bool direction, bool sync) {}
-#endif
-#endif /* _BLK_CGROUP_H */
+static inline struct blkcg *cgroup_to_blkcg(struct cgroup *cgroup) { return NULL; }
+static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
+static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
+static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
+static inline void blkcg_drain_queue(struct request_queue *q) { }
+static inline void blkcg_exit_queue(struct request_queue *q) { }
+static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
+static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { }
+static inline int blkcg_activate_policy(struct request_queue *q,
+ const struct blkcg_policy *pol) { return 0; }
+static inline void blkcg_deactivate_policy(struct request_queue *q,
+ const struct blkcg_policy *pol) { }
+
+static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
+ struct blkcg_policy *pol) { return NULL; }
+static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; }
+static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; }
+static inline void blkg_get(struct blkcg_gq *blkg) { }
+static inline void blkg_put(struct blkcg_gq *blkg) { }
+
+#endif /* CONFIG_BLK_CGROUP */
+#endif /* _BLK_CGROUP_H */
diff --git a/block/blk-core.c b/block/blk-core.c
index 1f61b74..3c923a7 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -29,11 +29,13 @@
#include <linux/fault-inject.h>
#include <linux/list_sort.h>
#include <linux/delay.h>
+#include <linux/ratelimit.h>
#define CREATE_TRACE_POINTS
#include <trace/events/block.h>
#include "blk.h"
+#include "blk-cgroup.h"
EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
@@ -280,7 +282,7 @@
*
* This function does not cancel any asynchronous activity arising
* out of elevator or throttling code. That would require elevaotor_exit()
- * and blk_throtl_exit() to be called with queue lock initialized.
+ * and blkcg_exit_queue() to be called with queue lock initialized.
*
*/
void blk_sync_queue(struct request_queue *q)
@@ -365,17 +367,23 @@
spin_lock_irq(q->queue_lock);
- elv_drain_elevator(q);
- if (drain_all)
- blk_throtl_drain(q);
+ /*
+ * The caller might be trying to drain @q before its
+ * elevator is initialized.
+ */
+ if (q->elevator)
+ elv_drain_elevator(q);
+
+ blkcg_drain_queue(q);
/*
* This function might be called on a queue which failed
- * driver init after queue creation. Some drivers
- * (e.g. fd) get unhappy in such cases. Kick queue iff
- * dispatch queue has something on it.
+ * driver init after queue creation or is not yet fully
+ * active yet. Some drivers (e.g. fd and loop) get unhappy
+ * in such cases. Kick queue iff dispatch queue has
+ * something on it and @q has request_fn set.
*/
- if (!list_empty(&q->queue_head))
+ if (!list_empty(&q->queue_head) && q->request_fn)
__blk_run_queue(q);
drain |= q->rq.elvpriv;
@@ -403,6 +411,49 @@
}
/**
+ * blk_queue_bypass_start - enter queue bypass mode
+ * @q: queue of interest
+ *
+ * In bypass mode, only the dispatch FIFO queue of @q is used. This
+ * function makes @q enter bypass mode and drains all requests which were
+ * throttled or issued before. On return, it's guaranteed that no request
+ * is being throttled or has ELVPRIV set and blk_queue_bypass() %true
+ * inside queue or RCU read lock.
+ */
+void blk_queue_bypass_start(struct request_queue *q)
+{
+ bool drain;
+
+ spin_lock_irq(q->queue_lock);
+ drain = !q->bypass_depth++;
+ queue_flag_set(QUEUE_FLAG_BYPASS, q);
+ spin_unlock_irq(q->queue_lock);
+
+ if (drain) {
+ blk_drain_queue(q, false);
+ /* ensure blk_queue_bypass() is %true inside RCU read lock */
+ synchronize_rcu();
+ }
+}
+EXPORT_SYMBOL_GPL(blk_queue_bypass_start);
+
+/**
+ * blk_queue_bypass_end - leave queue bypass mode
+ * @q: queue of interest
+ *
+ * Leave bypass mode and restore the normal queueing behavior.
+ */
+void blk_queue_bypass_end(struct request_queue *q)
+{
+ spin_lock_irq(q->queue_lock);
+ if (!--q->bypass_depth)
+ queue_flag_clear(QUEUE_FLAG_BYPASS, q);
+ WARN_ON_ONCE(q->bypass_depth < 0);
+ spin_unlock_irq(q->queue_lock);
+}
+EXPORT_SYMBOL_GPL(blk_queue_bypass_end);
+
+/**
* blk_cleanup_queue - shutdown a request queue
* @q: request queue to shutdown
*
@@ -418,6 +469,19 @@
queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q);
spin_lock_irq(lock);
+
+ /*
+ * Dead queue is permanently in bypass mode till released. Note
+ * that, unlike blk_queue_bypass_start(), we aren't performing
+ * synchronize_rcu() after entering bypass mode to avoid the delay
+ * as some drivers create and destroy a lot of queues while
+ * probing. This is still safe because blk_release_queue() will be
+ * called only after the queue refcnt drops to zero and nothing,
+ * RCU or not, would be traversing the queue by then.
+ */
+ q->bypass_depth++;
+ queue_flag_set(QUEUE_FLAG_BYPASS, q);
+
queue_flag_set(QUEUE_FLAG_NOMERGES, q);
queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
queue_flag_set(QUEUE_FLAG_DEAD, q);
@@ -428,13 +492,8 @@
spin_unlock_irq(lock);
mutex_unlock(&q->sysfs_lock);
- /*
- * Drain all requests queued before DEAD marking. The caller might
- * be trying to tear down @q before its elevator is initialized, in
- * which case we don't want to call into draining.
- */
- if (q->elevator)
- blk_drain_queue(q, true);
+ /* drain all requests queued before DEAD marking */
+ blk_drain_queue(q, true);
/* @q won't process any more request, flush async actions */
del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer);
@@ -498,14 +557,15 @@
if (err)
goto fail_id;
- if (blk_throtl_init(q))
- goto fail_id;
-
setup_timer(&q->backing_dev_info.laptop_mode_wb_timer,
laptop_mode_timer_fn, (unsigned long) q);
setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q);
+ INIT_LIST_HEAD(&q->queue_head);
INIT_LIST_HEAD(&q->timeout_list);
INIT_LIST_HEAD(&q->icq_list);
+#ifdef CONFIG_BLK_CGROUP
+ INIT_LIST_HEAD(&q->blkg_list);
+#endif
INIT_LIST_HEAD(&q->flush_queue[0]);
INIT_LIST_HEAD(&q->flush_queue[1]);
INIT_LIST_HEAD(&q->flush_data_in_flight);
@@ -522,6 +582,18 @@
*/
q->queue_lock = &q->__queue_lock;
+ /*
+ * A queue starts its life with bypass turned on to avoid
+ * unnecessary bypass on/off overhead and nasty surprises during
+ * init. The initial bypass will be finished at the end of
+ * blk_init_allocated_queue().
+ */
+ q->bypass_depth = 1;
+ __set_bit(QUEUE_FLAG_BYPASS, &q->queue_flags);
+
+ if (blkcg_init_queue(q))
+ goto fail_id;
+
return q;
fail_id:
@@ -614,15 +686,15 @@
q->sg_reserved_size = INT_MAX;
- /*
- * all done
- */
- if (!elevator_init(q, NULL)) {
- blk_queue_congestion_threshold(q);
- return q;
- }
+ /* init elevator */
+ if (elevator_init(q, NULL))
+ return NULL;
- return NULL;
+ blk_queue_congestion_threshold(q);
+
+ /* all done, end the initial bypass */
+ blk_queue_bypass_end(q);
+ return q;
}
EXPORT_SYMBOL(blk_init_allocated_queue);
@@ -648,33 +720,6 @@
mempool_free(rq, q->rq.rq_pool);
}
-static struct request *
-blk_alloc_request(struct request_queue *q, struct io_cq *icq,
- unsigned int flags, gfp_t gfp_mask)
-{
- struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask);
-
- if (!rq)
- return NULL;
-
- blk_rq_init(q, rq);
-
- rq->cmd_flags = flags | REQ_ALLOCED;
-
- if (flags & REQ_ELVPRIV) {
- rq->elv.icq = icq;
- if (unlikely(elv_set_request(q, rq, gfp_mask))) {
- mempool_free(rq, q->rq.rq_pool);
- return NULL;
- }
- /* @rq->elv.icq holds on to io_context until @rq is freed */
- if (icq)
- get_io_context(icq->ioc);
- }
-
- return rq;
-}
-
/*
* ioc_batching returns true if the ioc is a valid batching request and
* should be given priority access to a request.
@@ -763,6 +808,22 @@
}
/**
+ * rq_ioc - determine io_context for request allocation
+ * @bio: request being allocated is for this bio (can be %NULL)
+ *
+ * Determine io_context to use for request allocation for @bio. May return
+ * %NULL if %current->io_context doesn't exist.
+ */
+static struct io_context *rq_ioc(struct bio *bio)
+{
+#ifdef CONFIG_BLK_CGROUP
+ if (bio && bio->bi_ioc)
+ return bio->bi_ioc;
+#endif
+ return current->io_context;
+}
+
+/**
* get_request - get a free request
* @q: request_queue to allocate request from
* @rw_flags: RW and SYNC flags
@@ -779,7 +840,7 @@
static struct request *get_request(struct request_queue *q, int rw_flags,
struct bio *bio, gfp_t gfp_mask)
{
- struct request *rq = NULL;
+ struct request *rq;
struct request_list *rl = &q->rq;
struct elevator_type *et;
struct io_context *ioc;
@@ -789,7 +850,7 @@
int may_queue;
retry:
et = q->elevator->type;
- ioc = current->io_context;
+ ioc = rq_ioc(bio);
if (unlikely(blk_queue_dead(q)))
return NULL;
@@ -808,7 +869,7 @@
*/
if (!ioc && !retried) {
spin_unlock_irq(q->queue_lock);
- create_io_context(current, gfp_mask, q->node);
+ create_io_context(gfp_mask, q->node);
spin_lock_irq(q->queue_lock);
retried = true;
goto retry;
@@ -831,7 +892,7 @@
* process is not a "batcher", and not
* exempted by the IO scheduler
*/
- goto out;
+ return NULL;
}
}
}
@@ -844,7 +905,7 @@
* allocated with any setting of ->nr_requests
*/
if (rl->count[is_sync] >= (3 * q->nr_requests / 2))
- goto out;
+ return NULL;
rl->count[is_sync]++;
rl->starved[is_sync] = 0;
@@ -859,8 +920,7 @@
* Also, lookup icq while holding queue_lock. If it doesn't exist,
* it will be created after releasing queue_lock.
*/
- if (blk_rq_should_init_elevator(bio) &&
- !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags)) {
+ if (blk_rq_should_init_elevator(bio) && !blk_queue_bypass(q)) {
rw_flags |= REQ_ELVPRIV;
rl->elvpriv++;
if (et->icq_cache && ioc)
@@ -871,41 +931,36 @@
rw_flags |= REQ_IO_STAT;
spin_unlock_irq(q->queue_lock);
- /* create icq if missing */
- if ((rw_flags & REQ_ELVPRIV) && unlikely(et->icq_cache && !icq)) {
- icq = ioc_create_icq(q, gfp_mask);
- if (!icq)
- goto fail_icq;
+ /* allocate and init request */
+ rq = mempool_alloc(q->rq.rq_pool, gfp_mask);
+ if (!rq)
+ goto fail_alloc;
+
+ blk_rq_init(q, rq);
+ rq->cmd_flags = rw_flags | REQ_ALLOCED;
+
+ /* init elvpriv */
+ if (rw_flags & REQ_ELVPRIV) {
+ if (unlikely(et->icq_cache && !icq)) {
+ create_io_context(gfp_mask, q->node);
+ ioc = rq_ioc(bio);
+ if (!ioc)
+ goto fail_elvpriv;
+
+ icq = ioc_create_icq(ioc, q, gfp_mask);
+ if (!icq)
+ goto fail_elvpriv;
+ }
+
+ rq->elv.icq = icq;
+ if (unlikely(elv_set_request(q, rq, bio, gfp_mask)))
+ goto fail_elvpriv;
+
+ /* @rq->elv.icq holds io_context until @rq is freed */
+ if (icq)
+ get_io_context(icq->ioc);
}
-
- rq = blk_alloc_request(q, icq, rw_flags, gfp_mask);
-
-fail_icq:
- if (unlikely(!rq)) {
- /*
- * Allocation failed presumably due to memory. Undo anything
- * we might have messed up.
- *
- * Allocating task should really be put onto the front of the
- * wait queue, but this is pretty rare.
- */
- spin_lock_irq(q->queue_lock);
- freed_request(q, rw_flags);
-
- /*
- * in the very unlikely event that allocation failed and no
- * requests for this direction was pending, mark us starved
- * so that freeing of a request in the other direction will
- * notice us. another possible fix would be to split the
- * rq mempool into READ and WRITE
- */
-rq_starved:
- if (unlikely(rl->count[is_sync] == 0))
- rl->starved[is_sync] = 1;
-
- goto out;
- }
-
+out:
/*
* ioc may be NULL here, and ioc_batching will be false. That's
* OK, if the queue is under the request limit then requests need
@@ -916,8 +971,48 @@
ioc->nr_batch_requests--;
trace_block_getrq(q, bio, rw_flags & 1);
-out:
return rq;
+
+fail_elvpriv:
+ /*
+ * elvpriv init failed. ioc, icq and elvpriv aren't mempool backed
+ * and may fail indefinitely under memory pressure and thus
+ * shouldn't stall IO. Treat this request as !elvpriv. This will
+ * disturb iosched and blkcg but weird is bettern than dead.
+ */
+ printk_ratelimited(KERN_WARNING "%s: request aux data allocation failed, iosched may be disturbed\n",
+ dev_name(q->backing_dev_info.dev));
+
+ rq->cmd_flags &= ~REQ_ELVPRIV;
+ rq->elv.icq = NULL;
+
+ spin_lock_irq(q->queue_lock);
+ rl->elvpriv--;
+ spin_unlock_irq(q->queue_lock);
+ goto out;
+
+fail_alloc:
+ /*
+ * Allocation failed presumably due to memory. Undo anything we
+ * might have messed up.
+ *
+ * Allocating task should really be put onto the front of the wait
+ * queue, but this is pretty rare.
+ */
+ spin_lock_irq(q->queue_lock);
+ freed_request(q, rw_flags);
+
+ /*
+ * in the very unlikely event that allocation failed and no
+ * requests for this direction was pending, mark us starved so that
+ * freeing of a request in the other direction will notice
+ * us. another possible fix would be to split the rq mempool into
+ * READ and WRITE
+ */
+rq_starved:
+ if (unlikely(rl->count[is_sync] == 0))
+ rl->starved[is_sync] = 1;
+ return NULL;
}
/**
@@ -961,7 +1056,7 @@
* up to a big batch of them for a small period time.
* See ioc_batching, ioc_set_batching
*/
- create_io_context(current, GFP_NOIO, q->node);
+ create_io_context(GFP_NOIO, q->node);
ioc_set_batching(q, current->io_context);
spin_lock_irq(q->queue_lock);
diff --git a/block/blk-ioc.c b/block/blk-ioc.c
index fb95dd2..1e2d53b 100644
--- a/block/blk-ioc.c
+++ b/block/blk-ioc.c
@@ -155,20 +155,20 @@
}
EXPORT_SYMBOL(put_io_context);
-/* Called by the exiting task */
-void exit_io_context(struct task_struct *task)
+/**
+ * put_io_context_active - put active reference on ioc
+ * @ioc: ioc of interest
+ *
+ * Undo get_io_context_active(). If active reference reaches zero after
+ * put, @ioc can never issue further IOs and ioscheds are notified.
+ */
+void put_io_context_active(struct io_context *ioc)
{
- struct io_context *ioc;
- struct io_cq *icq;
struct hlist_node *n;
unsigned long flags;
+ struct io_cq *icq;
- task_lock(task);
- ioc = task->io_context;
- task->io_context = NULL;
- task_unlock(task);
-
- if (!atomic_dec_and_test(&ioc->nr_tasks)) {
+ if (!atomic_dec_and_test(&ioc->active_ref)) {
put_io_context(ioc);
return;
}
@@ -197,6 +197,20 @@
put_io_context(ioc);
}
+/* Called by the exiting task */
+void exit_io_context(struct task_struct *task)
+{
+ struct io_context *ioc;
+
+ task_lock(task);
+ ioc = task->io_context;
+ task->io_context = NULL;
+ task_unlock(task);
+
+ atomic_dec(&ioc->nr_tasks);
+ put_io_context_active(ioc);
+}
+
/**
* ioc_clear_queue - break any ioc association with the specified queue
* @q: request_queue being cleared
@@ -218,19 +232,18 @@
}
}
-void create_io_context_slowpath(struct task_struct *task, gfp_t gfp_flags,
- int node)
+int create_task_io_context(struct task_struct *task, gfp_t gfp_flags, int node)
{
struct io_context *ioc;
ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags | __GFP_ZERO,
node);
if (unlikely(!ioc))
- return;
+ return -ENOMEM;
/* initialize */
atomic_long_set(&ioc->refcount, 1);
- atomic_set(&ioc->nr_tasks, 1);
+ atomic_set(&ioc->active_ref, 1);
spin_lock_init(&ioc->lock);
INIT_RADIX_TREE(&ioc->icq_tree, GFP_ATOMIC | __GFP_HIGH);
INIT_HLIST_HEAD(&ioc->icq_list);
@@ -250,6 +263,8 @@
else
kmem_cache_free(iocontext_cachep, ioc);
task_unlock(task);
+
+ return 0;
}
/**
@@ -281,7 +296,7 @@
return ioc;
}
task_unlock(task);
- } while (create_io_context(task, gfp_flags, node));
+ } while (!create_task_io_context(task, gfp_flags, node));
return NULL;
}
@@ -325,26 +340,23 @@
/**
* ioc_create_icq - create and link io_cq
+ * @ioc: io_context of interest
* @q: request_queue of interest
* @gfp_mask: allocation mask
*
- * Make sure io_cq linking %current->io_context and @q exists. If either
- * io_context and/or icq don't exist, they will be created using @gfp_mask.
+ * Make sure io_cq linking @ioc and @q exists. If icq doesn't exist, they
+ * will be created using @gfp_mask.
*
* The caller is responsible for ensuring @ioc won't go away and @q is
* alive and will stay alive until this function returns.
*/
-struct io_cq *ioc_create_icq(struct request_queue *q, gfp_t gfp_mask)
+struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
+ gfp_t gfp_mask)
{
struct elevator_type *et = q->elevator->type;
- struct io_context *ioc;
struct io_cq *icq;
/* allocate stuff */
- ioc = create_io_context(current, gfp_mask, q->node);
- if (!ioc)
- return NULL;
-
icq = kmem_cache_alloc_node(et->icq_cache, gfp_mask | __GFP_ZERO,
q->node);
if (!icq)
@@ -382,74 +394,6 @@
return icq;
}
-void ioc_set_icq_flags(struct io_context *ioc, unsigned int flags)
-{
- struct io_cq *icq;
- struct hlist_node *n;
-
- hlist_for_each_entry(icq, n, &ioc->icq_list, ioc_node)
- icq->flags |= flags;
-}
-
-/**
- * ioc_ioprio_changed - notify ioprio change
- * @ioc: io_context of interest
- * @ioprio: new ioprio
- *
- * @ioc's ioprio has changed to @ioprio. Set %ICQ_IOPRIO_CHANGED for all
- * icq's. iosched is responsible for checking the bit and applying it on
- * request issue path.
- */
-void ioc_ioprio_changed(struct io_context *ioc, int ioprio)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&ioc->lock, flags);
- ioc->ioprio = ioprio;
- ioc_set_icq_flags(ioc, ICQ_IOPRIO_CHANGED);
- spin_unlock_irqrestore(&ioc->lock, flags);
-}
-
-/**
- * ioc_cgroup_changed - notify cgroup change
- * @ioc: io_context of interest
- *
- * @ioc's cgroup has changed. Set %ICQ_CGROUP_CHANGED for all icq's.
- * iosched is responsible for checking the bit and applying it on request
- * issue path.
- */
-void ioc_cgroup_changed(struct io_context *ioc)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&ioc->lock, flags);
- ioc_set_icq_flags(ioc, ICQ_CGROUP_CHANGED);
- spin_unlock_irqrestore(&ioc->lock, flags);
-}
-EXPORT_SYMBOL(ioc_cgroup_changed);
-
-/**
- * icq_get_changed - fetch and clear icq changed mask
- * @icq: icq of interest
- *
- * Fetch and clear ICQ_*_CHANGED bits from @icq. Grabs and releases
- * @icq->ioc->lock.
- */
-unsigned icq_get_changed(struct io_cq *icq)
-{
- unsigned int changed = 0;
- unsigned long flags;
-
- if (unlikely(icq->flags & ICQ_CHANGED_MASK)) {
- spin_lock_irqsave(&icq->ioc->lock, flags);
- changed = icq->flags & ICQ_CHANGED_MASK;
- icq->flags &= ~ICQ_CHANGED_MASK;
- spin_unlock_irqrestore(&icq->ioc->lock, flags);
- }
- return changed;
-}
-EXPORT_SYMBOL(icq_get_changed);
-
static int __init blk_ioc_init(void)
{
iocontext_cachep = kmem_cache_create("blkdev_ioc",
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index cf15001..aa41b47 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -9,6 +9,7 @@
#include <linux/blktrace_api.h>
#include "blk.h"
+#include "blk-cgroup.h"
struct queue_sysfs_entry {
struct attribute attr;
@@ -479,6 +480,8 @@
blk_sync_queue(q);
+ blkcg_exit_queue(q);
+
if (q->elevator) {
spin_lock_irq(q->queue_lock);
ioc_clear_queue(q);
@@ -486,15 +489,12 @@
elevator_exit(q->elevator);
}
- blk_throtl_exit(q);
-
if (rl->rq_pool)
mempool_destroy(rl->rq_pool);
if (q->queue_tags)
__blk_queue_free_tags(q);
- blk_throtl_release(q);
blk_trace_shutdown(q);
bdi_destroy(&q->backing_dev_info);
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index f2ddb94..5b06595 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -21,6 +21,8 @@
/* Throttling is performed over 100ms slice and after that slice is renewed */
static unsigned long throtl_slice = HZ/10; /* 100 ms */
+static struct blkcg_policy blkcg_policy_throtl;
+
/* A workqueue to queue throttle related work */
static struct workqueue_struct *kthrotld_workqueue;
static void throtl_schedule_delayed_work(struct throtl_data *td,
@@ -38,9 +40,17 @@
#define rb_entry_tg(node) rb_entry((node), struct throtl_grp, rb_node)
+/* Per-cpu group stats */
+struct tg_stats_cpu {
+ /* total bytes transferred */
+ struct blkg_rwstat service_bytes;
+ /* total IOs serviced, post merge */
+ struct blkg_rwstat serviced;
+};
+
struct throtl_grp {
- /* List of throtl groups on the request queue*/
- struct hlist_node tg_node;
+ /* must be the first member */
+ struct blkg_policy_data pd;
/* active throtl group service_tree member */
struct rb_node rb_node;
@@ -52,8 +62,6 @@
*/
unsigned long disptime;
- struct blkio_group blkg;
- atomic_t ref;
unsigned int flags;
/* Two lists for READ and WRITE */
@@ -80,18 +88,18 @@
/* Some throttle limits got updated for the group */
int limits_changed;
- struct rcu_head rcu_head;
+ /* Per cpu stats pointer */
+ struct tg_stats_cpu __percpu *stats_cpu;
+
+ /* List of tgs waiting for per cpu stats memory to be allocated */
+ struct list_head stats_alloc_node;
};
struct throtl_data
{
- /* List of throtl groups */
- struct hlist_head tg_list;
-
/* service tree for active throtl groups */
struct throtl_rb_root tg_service_tree;
- struct throtl_grp *root_tg;
struct request_queue *queue;
/* Total Number of queued bios on READ and WRITE lists */
@@ -108,6 +116,33 @@
int limits_changed;
};
+/* list and work item to allocate percpu group stats */
+static DEFINE_SPINLOCK(tg_stats_alloc_lock);
+static LIST_HEAD(tg_stats_alloc_list);
+
+static void tg_stats_alloc_fn(struct work_struct *);
+static DECLARE_DELAYED_WORK(tg_stats_alloc_work, tg_stats_alloc_fn);
+
+static inline struct throtl_grp *pd_to_tg(struct blkg_policy_data *pd)
+{
+ return pd ? container_of(pd, struct throtl_grp, pd) : NULL;
+}
+
+static inline struct throtl_grp *blkg_to_tg(struct blkcg_gq *blkg)
+{
+ return pd_to_tg(blkg_to_pd(blkg, &blkcg_policy_throtl));
+}
+
+static inline struct blkcg_gq *tg_to_blkg(struct throtl_grp *tg)
+{
+ return pd_to_blkg(&tg->pd);
+}
+
+static inline struct throtl_grp *td_root_tg(struct throtl_data *td)
+{
+ return blkg_to_tg(td->queue->root_blkg);
+}
+
enum tg_state_flags {
THROTL_TG_FLAG_on_rr = 0, /* on round-robin busy list */
};
@@ -128,244 +163,150 @@
THROTL_TG_FNS(on_rr);
-#define throtl_log_tg(td, tg, fmt, args...) \
- blk_add_trace_msg((td)->queue, "throtl %s " fmt, \
- blkg_path(&(tg)->blkg), ##args); \
+#define throtl_log_tg(td, tg, fmt, args...) do { \
+ char __pbuf[128]; \
+ \
+ blkg_path(tg_to_blkg(tg), __pbuf, sizeof(__pbuf)); \
+ blk_add_trace_msg((td)->queue, "throtl %s " fmt, __pbuf, ##args); \
+} while (0)
#define throtl_log(td, fmt, args...) \
blk_add_trace_msg((td)->queue, "throtl " fmt, ##args)
-static inline struct throtl_grp *tg_of_blkg(struct blkio_group *blkg)
-{
- if (blkg)
- return container_of(blkg, struct throtl_grp, blkg);
-
- return NULL;
-}
-
static inline unsigned int total_nr_queued(struct throtl_data *td)
{
return td->nr_queued[0] + td->nr_queued[1];
}
-static inline struct throtl_grp *throtl_ref_get_tg(struct throtl_grp *tg)
+/*
+ * Worker for allocating per cpu stat for tgs. This is scheduled on the
+ * system_nrt_wq once there are some groups on the alloc_list waiting for
+ * allocation.
+ */
+static void tg_stats_alloc_fn(struct work_struct *work)
{
- atomic_inc(&tg->ref);
- return tg;
+ static struct tg_stats_cpu *stats_cpu; /* this fn is non-reentrant */
+ struct delayed_work *dwork = to_delayed_work(work);
+ bool empty = false;
+
+alloc_stats:
+ if (!stats_cpu) {
+ stats_cpu = alloc_percpu(struct tg_stats_cpu);
+ if (!stats_cpu) {
+ /* allocation failed, try again after some time */
+ queue_delayed_work(system_nrt_wq, dwork,
+ msecs_to_jiffies(10));
+ return;
+ }
+ }
+
+ spin_lock_irq(&tg_stats_alloc_lock);
+
+ if (!list_empty(&tg_stats_alloc_list)) {
+ struct throtl_grp *tg = list_first_entry(&tg_stats_alloc_list,
+ struct throtl_grp,
+ stats_alloc_node);
+ swap(tg->stats_cpu, stats_cpu);
+ list_del_init(&tg->stats_alloc_node);
+ }
+
+ empty = list_empty(&tg_stats_alloc_list);
+ spin_unlock_irq(&tg_stats_alloc_lock);
+ if (!empty)
+ goto alloc_stats;
}
-static void throtl_free_tg(struct rcu_head *head)
+static void throtl_pd_init(struct blkcg_gq *blkg)
{
- struct throtl_grp *tg;
+ struct throtl_grp *tg = blkg_to_tg(blkg);
+ unsigned long flags;
- tg = container_of(head, struct throtl_grp, rcu_head);
- free_percpu(tg->blkg.stats_cpu);
- kfree(tg);
-}
-
-static void throtl_put_tg(struct throtl_grp *tg)
-{
- BUG_ON(atomic_read(&tg->ref) <= 0);
- if (!atomic_dec_and_test(&tg->ref))
- return;
-
- /*
- * A group is freed in rcu manner. But having an rcu lock does not
- * mean that one can access all the fields of blkg and assume these
- * are valid. For example, don't try to follow throtl_data and
- * request queue links.
- *
- * Having a reference to blkg under an rcu allows acess to only
- * values local to groups like group stats and group rate limits
- */
- call_rcu(&tg->rcu_head, throtl_free_tg);
-}
-
-static void throtl_init_group(struct throtl_grp *tg)
-{
- INIT_HLIST_NODE(&tg->tg_node);
RB_CLEAR_NODE(&tg->rb_node);
bio_list_init(&tg->bio_lists[0]);
bio_list_init(&tg->bio_lists[1]);
tg->limits_changed = false;
- /* Practically unlimited BW */
- tg->bps[0] = tg->bps[1] = -1;
- tg->iops[0] = tg->iops[1] = -1;
+ tg->bps[READ] = -1;
+ tg->bps[WRITE] = -1;
+ tg->iops[READ] = -1;
+ tg->iops[WRITE] = -1;
/*
- * Take the initial reference that will be released on destroy
- * This can be thought of a joint reference by cgroup and
- * request queue which will be dropped by either request queue
- * exit or cgroup deletion path depending on who is exiting first.
+ * Ugh... We need to perform per-cpu allocation for tg->stats_cpu
+ * but percpu allocator can't be called from IO path. Queue tg on
+ * tg_stats_alloc_list and allocate from work item.
*/
- atomic_set(&tg->ref, 1);
+ spin_lock_irqsave(&tg_stats_alloc_lock, flags);
+ list_add(&tg->stats_alloc_node, &tg_stats_alloc_list);
+ queue_delayed_work(system_nrt_wq, &tg_stats_alloc_work, 0);
+ spin_unlock_irqrestore(&tg_stats_alloc_lock, flags);
}
-/* Should be called with rcu read lock held (needed for blkcg) */
-static void
-throtl_add_group_to_td_list(struct throtl_data *td, struct throtl_grp *tg)
+static void throtl_pd_exit(struct blkcg_gq *blkg)
{
- hlist_add_head(&tg->tg_node, &td->tg_list);
- td->nr_undestroyed_grps++;
+ struct throtl_grp *tg = blkg_to_tg(blkg);
+ unsigned long flags;
+
+ spin_lock_irqsave(&tg_stats_alloc_lock, flags);
+ list_del_init(&tg->stats_alloc_node);
+ spin_unlock_irqrestore(&tg_stats_alloc_lock, flags);
+
+ free_percpu(tg->stats_cpu);
}
-static void
-__throtl_tg_fill_dev_details(struct throtl_data *td, struct throtl_grp *tg)
+static void throtl_pd_reset_stats(struct blkcg_gq *blkg)
{
- struct backing_dev_info *bdi = &td->queue->backing_dev_info;
- unsigned int major, minor;
+ struct throtl_grp *tg = blkg_to_tg(blkg);
+ int cpu;
- if (!tg || tg->blkg.dev)
+ if (tg->stats_cpu == NULL)
return;
- /*
- * Fill in device details for a group which might not have been
- * filled at group creation time as queue was being instantiated
- * and driver had not attached a device yet
- */
- if (bdi->dev && dev_name(bdi->dev)) {
- sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
- tg->blkg.dev = MKDEV(major, minor);
+ for_each_possible_cpu(cpu) {
+ struct tg_stats_cpu *sc = per_cpu_ptr(tg->stats_cpu, cpu);
+
+ blkg_rwstat_reset(&sc->service_bytes);
+ blkg_rwstat_reset(&sc->serviced);
}
}
-/*
- * Should be called with without queue lock held. Here queue lock will be
- * taken rarely. It will be taken only once during life time of a group
- * if need be
- */
-static void
-throtl_tg_fill_dev_details(struct throtl_data *td, struct throtl_grp *tg)
+static struct throtl_grp *throtl_lookup_tg(struct throtl_data *td,
+ struct blkcg *blkcg)
{
- if (!tg || tg->blkg.dev)
- return;
-
- spin_lock_irq(td->queue->queue_lock);
- __throtl_tg_fill_dev_details(td, tg);
- spin_unlock_irq(td->queue->queue_lock);
-}
-
-static void throtl_init_add_tg_lists(struct throtl_data *td,
- struct throtl_grp *tg, struct blkio_cgroup *blkcg)
-{
- __throtl_tg_fill_dev_details(td, tg);
-
- /* Add group onto cgroup list */
- blkiocg_add_blkio_group(blkcg, &tg->blkg, (void *)td,
- tg->blkg.dev, BLKIO_POLICY_THROTL);
-
- tg->bps[READ] = blkcg_get_read_bps(blkcg, tg->blkg.dev);
- tg->bps[WRITE] = blkcg_get_write_bps(blkcg, tg->blkg.dev);
- tg->iops[READ] = blkcg_get_read_iops(blkcg, tg->blkg.dev);
- tg->iops[WRITE] = blkcg_get_write_iops(blkcg, tg->blkg.dev);
-
- throtl_add_group_to_td_list(td, tg);
-}
-
-/* Should be called without queue lock and outside of rcu period */
-static struct throtl_grp *throtl_alloc_tg(struct throtl_data *td)
-{
- struct throtl_grp *tg = NULL;
- int ret;
-
- tg = kzalloc_node(sizeof(*tg), GFP_ATOMIC, td->queue->node);
- if (!tg)
- return NULL;
-
- ret = blkio_alloc_blkg_stats(&tg->blkg);
-
- if (ret) {
- kfree(tg);
- return NULL;
- }
-
- throtl_init_group(tg);
- return tg;
-}
-
-static struct
-throtl_grp *throtl_find_tg(struct throtl_data *td, struct blkio_cgroup *blkcg)
-{
- struct throtl_grp *tg = NULL;
- void *key = td;
-
/*
- * This is the common case when there are no blkio cgroups.
- * Avoid lookup in this case
- */
- if (blkcg == &blkio_root_cgroup)
- tg = td->root_tg;
- else
- tg = tg_of_blkg(blkiocg_lookup_group(blkcg, key));
+ * This is the common case when there are no blkcgs. Avoid lookup
+ * in this case
+ */
+ if (blkcg == &blkcg_root)
+ return td_root_tg(td);
- __throtl_tg_fill_dev_details(td, tg);
- return tg;
+ return blkg_to_tg(blkg_lookup(blkcg, td->queue));
}
-static struct throtl_grp * throtl_get_tg(struct throtl_data *td)
+static struct throtl_grp *throtl_lookup_create_tg(struct throtl_data *td,
+ struct blkcg *blkcg)
{
- struct throtl_grp *tg = NULL, *__tg = NULL;
- struct blkio_cgroup *blkcg;
struct request_queue *q = td->queue;
-
- /* no throttling for dead queue */
- if (unlikely(blk_queue_dead(q)))
- return NULL;
-
- rcu_read_lock();
- blkcg = task_blkio_cgroup(current);
- tg = throtl_find_tg(td, blkcg);
- if (tg) {
- rcu_read_unlock();
- return tg;
- }
+ struct throtl_grp *tg = NULL;
/*
- * Need to allocate a group. Allocation of group also needs allocation
- * of per cpu stats which in-turn takes a mutex() and can block. Hence
- * we need to drop rcu lock and queue_lock before we call alloc.
+ * This is the common case when there are no blkcgs. Avoid lookup
+ * in this case
*/
- rcu_read_unlock();
- spin_unlock_irq(q->queue_lock);
+ if (blkcg == &blkcg_root) {
+ tg = td_root_tg(td);
+ } else {
+ struct blkcg_gq *blkg;
- tg = throtl_alloc_tg(td);
+ blkg = blkg_lookup_create(blkcg, q);
- /* Group allocated and queue is still alive. take the lock */
- spin_lock_irq(q->queue_lock);
-
- /* Make sure @q is still alive */
- if (unlikely(blk_queue_dead(q))) {
- kfree(tg);
- return NULL;
+ /* if %NULL and @q is alive, fall back to root_tg */
+ if (!IS_ERR(blkg))
+ tg = blkg_to_tg(blkg);
+ else if (!blk_queue_dead(q))
+ tg = td_root_tg(td);
}
- /*
- * Initialize the new group. After sleeping, read the blkcg again.
- */
- rcu_read_lock();
- blkcg = task_blkio_cgroup(current);
-
- /*
- * If some other thread already allocated the group while we were
- * not holding queue lock, free up the group
- */
- __tg = throtl_find_tg(td, blkcg);
-
- if (__tg) {
- kfree(tg);
- rcu_read_unlock();
- return __tg;
- }
-
- /* Group allocation failed. Account the IO to root group */
- if (!tg) {
- tg = td->root_tg;
- return tg;
- }
-
- throtl_init_add_tg_lists(td, tg, blkcg);
- rcu_read_unlock();
return tg;
}
@@ -734,16 +675,41 @@
return 0;
}
+static void throtl_update_dispatch_stats(struct blkcg_gq *blkg, u64 bytes,
+ int rw)
+{
+ struct throtl_grp *tg = blkg_to_tg(blkg);
+ struct tg_stats_cpu *stats_cpu;
+ unsigned long flags;
+
+ /* If per cpu stats are not allocated yet, don't do any accounting. */
+ if (tg->stats_cpu == NULL)
+ return;
+
+ /*
+ * Disabling interrupts to provide mutual exclusion between two
+ * writes on same cpu. It probably is not needed for 64bit. Not
+ * optimizing that case yet.
+ */
+ local_irq_save(flags);
+
+ stats_cpu = this_cpu_ptr(tg->stats_cpu);
+
+ blkg_rwstat_add(&stats_cpu->serviced, rw, 1);
+ blkg_rwstat_add(&stats_cpu->service_bytes, rw, bytes);
+
+ local_irq_restore(flags);
+}
+
static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
{
bool rw = bio_data_dir(bio);
- bool sync = rw_is_sync(bio->bi_rw);
/* Charge the bio to the group */
tg->bytes_disp[rw] += bio->bi_size;
tg->io_disp[rw]++;
- blkiocg_update_dispatch_stats(&tg->blkg, bio->bi_size, rw, sync);
+ throtl_update_dispatch_stats(tg_to_blkg(tg), bio->bi_size, bio->bi_rw);
}
static void throtl_add_bio_tg(struct throtl_data *td, struct throtl_grp *tg,
@@ -753,7 +719,7 @@
bio_list_add(&tg->bio_lists[rw], bio);
/* Take a bio reference on tg */
- throtl_ref_get_tg(tg);
+ blkg_get(tg_to_blkg(tg));
tg->nr_queued[rw]++;
td->nr_queued[rw]++;
throtl_enqueue_tg(td, tg);
@@ -786,8 +752,8 @@
bio = bio_list_pop(&tg->bio_lists[rw]);
tg->nr_queued[rw]--;
- /* Drop bio reference on tg */
- throtl_put_tg(tg);
+ /* Drop bio reference on blkg */
+ blkg_put(tg_to_blkg(tg));
BUG_ON(td->nr_queued[rw] <= 0);
td->nr_queued[rw]--;
@@ -865,8 +831,8 @@
static void throtl_process_limit_change(struct throtl_data *td)
{
- struct throtl_grp *tg;
- struct hlist_node *pos, *n;
+ struct request_queue *q = td->queue;
+ struct blkcg_gq *blkg, *n;
if (!td->limits_changed)
return;
@@ -875,7 +841,9 @@
throtl_log(td, "limits changed");
- hlist_for_each_entry_safe(tg, pos, n, &td->tg_list, tg_node) {
+ list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
+ struct throtl_grp *tg = blkg_to_tg(blkg);
+
if (!tg->limits_changed)
continue;
@@ -973,119 +941,158 @@
}
}
-static void
-throtl_destroy_tg(struct throtl_data *td, struct throtl_grp *tg)
+static u64 tg_prfill_cpu_rwstat(struct seq_file *sf,
+ struct blkg_policy_data *pd, int off)
{
- /* Something wrong if we are trying to remove same group twice */
- BUG_ON(hlist_unhashed(&tg->tg_node));
+ struct throtl_grp *tg = pd_to_tg(pd);
+ struct blkg_rwstat rwstat = { }, tmp;
+ int i, cpu;
- hlist_del_init(&tg->tg_node);
+ for_each_possible_cpu(cpu) {
+ struct tg_stats_cpu *sc = per_cpu_ptr(tg->stats_cpu, cpu);
- /*
- * Put the reference taken at the time of creation so that when all
- * queues are gone, group can be destroyed.
- */
- throtl_put_tg(tg);
- td->nr_undestroyed_grps--;
-}
-
-static void throtl_release_tgs(struct throtl_data *td)
-{
- struct hlist_node *pos, *n;
- struct throtl_grp *tg;
-
- hlist_for_each_entry_safe(tg, pos, n, &td->tg_list, tg_node) {
- /*
- * If cgroup removal path got to blk_group first and removed
- * it from cgroup list, then it will take care of destroying
- * cfqg also.
- */
- if (!blkiocg_del_blkio_group(&tg->blkg))
- throtl_destroy_tg(td, tg);
+ tmp = blkg_rwstat_read((void *)sc + off);
+ for (i = 0; i < BLKG_RWSTAT_NR; i++)
+ rwstat.cnt[i] += tmp.cnt[i];
}
+
+ return __blkg_prfill_rwstat(sf, pd, &rwstat);
}
-/*
- * Blk cgroup controller notification saying that blkio_group object is being
- * delinked as associated cgroup object is going away. That also means that
- * no new IO will come in this group. So get rid of this group as soon as
- * any pending IO in the group is finished.
- *
- * This function is called under rcu_read_lock(). key is the rcu protected
- * pointer. That means "key" is a valid throtl_data pointer as long as we are
- * rcu read lock.
- *
- * "key" was fetched from blkio_group under blkio_cgroup->lock. That means
- * it should not be NULL as even if queue was going away, cgroup deltion
- * path got to it first.
- */
-void throtl_unlink_blkio_group(void *key, struct blkio_group *blkg)
+static int tg_print_cpu_rwstat(struct cgroup *cgrp, struct cftype *cft,
+ struct seq_file *sf)
{
- unsigned long flags;
- struct throtl_data *td = key;
+ struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
- spin_lock_irqsave(td->queue->queue_lock, flags);
- throtl_destroy_tg(td, tg_of_blkg(blkg));
- spin_unlock_irqrestore(td->queue->queue_lock, flags);
+ blkcg_print_blkgs(sf, blkcg, tg_prfill_cpu_rwstat, &blkcg_policy_throtl,
+ cft->private, true);
+ return 0;
}
-static void throtl_update_blkio_group_common(struct throtl_data *td,
- struct throtl_grp *tg)
+static u64 tg_prfill_conf_u64(struct seq_file *sf, struct blkg_policy_data *pd,
+ int off)
{
+ struct throtl_grp *tg = pd_to_tg(pd);
+ u64 v = *(u64 *)((void *)tg + off);
+
+ if (v == -1)
+ return 0;
+ return __blkg_prfill_u64(sf, pd, v);
+}
+
+static u64 tg_prfill_conf_uint(struct seq_file *sf, struct blkg_policy_data *pd,
+ int off)
+{
+ struct throtl_grp *tg = pd_to_tg(pd);
+ unsigned int v = *(unsigned int *)((void *)tg + off);
+
+ if (v == -1)
+ return 0;
+ return __blkg_prfill_u64(sf, pd, v);
+}
+
+static int tg_print_conf_u64(struct cgroup *cgrp, struct cftype *cft,
+ struct seq_file *sf)
+{
+ blkcg_print_blkgs(sf, cgroup_to_blkcg(cgrp), tg_prfill_conf_u64,
+ &blkcg_policy_throtl, cft->private, false);
+ return 0;
+}
+
+static int tg_print_conf_uint(struct cgroup *cgrp, struct cftype *cft,
+ struct seq_file *sf)
+{
+ blkcg_print_blkgs(sf, cgroup_to_blkcg(cgrp), tg_prfill_conf_uint,
+ &blkcg_policy_throtl, cft->private, false);
+ return 0;
+}
+
+static int tg_set_conf(struct cgroup *cgrp, struct cftype *cft, const char *buf,
+ bool is_u64)
+{
+ struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
+ struct blkg_conf_ctx ctx;
+ struct throtl_grp *tg;
+ struct throtl_data *td;
+ int ret;
+
+ ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx);
+ if (ret)
+ return ret;
+
+ tg = blkg_to_tg(ctx.blkg);
+ td = ctx.blkg->q->td;
+
+ if (!ctx.v)
+ ctx.v = -1;
+
+ if (is_u64)
+ *(u64 *)((void *)tg + cft->private) = ctx.v;
+ else
+ *(unsigned int *)((void *)tg + cft->private) = ctx.v;
+
+ /* XXX: we don't need the following deferred processing */
xchg(&tg->limits_changed, true);
xchg(&td->limits_changed, true);
- /* Schedule a work now to process the limit change */
throtl_schedule_delayed_work(td, 0);
+
+ blkg_conf_finish(&ctx);
+ return 0;
}
-/*
- * For all update functions, key should be a valid pointer because these
- * update functions are called under blkcg_lock, that means, blkg is
- * valid and in turn key is valid. queue exit path can not race because
- * of blkcg_lock
- *
- * Can not take queue lock in update functions as queue lock under blkcg_lock
- * is not allowed. Under other paths we take blkcg_lock under queue_lock.
- */
-static void throtl_update_blkio_group_read_bps(void *key,
- struct blkio_group *blkg, u64 read_bps)
+static int tg_set_conf_u64(struct cgroup *cgrp, struct cftype *cft,
+ const char *buf)
{
- struct throtl_data *td = key;
- struct throtl_grp *tg = tg_of_blkg(blkg);
-
- tg->bps[READ] = read_bps;
- throtl_update_blkio_group_common(td, tg);
+ return tg_set_conf(cgrp, cft, buf, true);
}
-static void throtl_update_blkio_group_write_bps(void *key,
- struct blkio_group *blkg, u64 write_bps)
+static int tg_set_conf_uint(struct cgroup *cgrp, struct cftype *cft,
+ const char *buf)
{
- struct throtl_data *td = key;
- struct throtl_grp *tg = tg_of_blkg(blkg);
-
- tg->bps[WRITE] = write_bps;
- throtl_update_blkio_group_common(td, tg);
+ return tg_set_conf(cgrp, cft, buf, false);
}
-static void throtl_update_blkio_group_read_iops(void *key,
- struct blkio_group *blkg, unsigned int read_iops)
-{
- struct throtl_data *td = key;
- struct throtl_grp *tg = tg_of_blkg(blkg);
-
- tg->iops[READ] = read_iops;
- throtl_update_blkio_group_common(td, tg);
-}
-
-static void throtl_update_blkio_group_write_iops(void *key,
- struct blkio_group *blkg, unsigned int write_iops)
-{
- struct throtl_data *td = key;
- struct throtl_grp *tg = tg_of_blkg(blkg);
-
- tg->iops[WRITE] = write_iops;
- throtl_update_blkio_group_common(td, tg);
-}
+static struct cftype throtl_files[] = {
+ {
+ .name = "throttle.read_bps_device",
+ .private = offsetof(struct throtl_grp, bps[READ]),
+ .read_seq_string = tg_print_conf_u64,
+ .write_string = tg_set_conf_u64,
+ .max_write_len = 256,
+ },
+ {
+ .name = "throttle.write_bps_device",
+ .private = offsetof(struct throtl_grp, bps[WRITE]),
+ .read_seq_string = tg_print_conf_u64,
+ .write_string = tg_set_conf_u64,
+ .max_write_len = 256,
+ },
+ {
+ .name = "throttle.read_iops_device",
+ .private = offsetof(struct throtl_grp, iops[READ]),
+ .read_seq_string = tg_print_conf_uint,
+ .write_string = tg_set_conf_uint,
+ .max_write_len = 256,
+ },
+ {
+ .name = "throttle.write_iops_device",
+ .private = offsetof(struct throtl_grp, iops[WRITE]),
+ .read_seq_string = tg_print_conf_uint,
+ .write_string = tg_set_conf_uint,
+ .max_write_len = 256,
+ },
+ {
+ .name = "throttle.io_service_bytes",
+ .private = offsetof(struct tg_stats_cpu, service_bytes),
+ .read_seq_string = tg_print_cpu_rwstat,
+ },
+ {
+ .name = "throttle.io_serviced",
+ .private = offsetof(struct tg_stats_cpu, serviced),
+ .read_seq_string = tg_print_cpu_rwstat,
+ },
+ { } /* terminate */
+};
static void throtl_shutdown_wq(struct request_queue *q)
{
@@ -1094,19 +1101,13 @@
cancel_delayed_work_sync(&td->throtl_work);
}
-static struct blkio_policy_type blkio_policy_throtl = {
- .ops = {
- .blkio_unlink_group_fn = throtl_unlink_blkio_group,
- .blkio_update_group_read_bps_fn =
- throtl_update_blkio_group_read_bps,
- .blkio_update_group_write_bps_fn =
- throtl_update_blkio_group_write_bps,
- .blkio_update_group_read_iops_fn =
- throtl_update_blkio_group_read_iops,
- .blkio_update_group_write_iops_fn =
- throtl_update_blkio_group_write_iops,
- },
- .plid = BLKIO_POLICY_THROTL,
+static struct blkcg_policy blkcg_policy_throtl = {
+ .pd_size = sizeof(struct throtl_grp),
+ .cftypes = throtl_files,
+
+ .pd_init_fn = throtl_pd_init,
+ .pd_exit_fn = throtl_pd_exit,
+ .pd_reset_stats_fn = throtl_pd_reset_stats,
};
bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
@@ -1114,7 +1115,7 @@
struct throtl_data *td = q->td;
struct throtl_grp *tg;
bool rw = bio_data_dir(bio), update_disptime = true;
- struct blkio_cgroup *blkcg;
+ struct blkcg *blkcg;
bool throttled = false;
if (bio->bi_rw & REQ_THROTTLED) {
@@ -1122,33 +1123,31 @@
goto out;
}
+ /* bio_associate_current() needs ioc, try creating */
+ create_io_context(GFP_ATOMIC, q->node);
+
/*
* A throtl_grp pointer retrieved under rcu can be used to access
* basic fields like stats and io rates. If a group has no rules,
* just update the dispatch stats in lockless manner and return.
*/
-
rcu_read_lock();
- blkcg = task_blkio_cgroup(current);
- tg = throtl_find_tg(td, blkcg);
+ blkcg = bio_blkcg(bio);
+ tg = throtl_lookup_tg(td, blkcg);
if (tg) {
- throtl_tg_fill_dev_details(td, tg);
-
if (tg_no_rule_group(tg, rw)) {
- blkiocg_update_dispatch_stats(&tg->blkg, bio->bi_size,
- rw, rw_is_sync(bio->bi_rw));
- rcu_read_unlock();
- goto out;
+ throtl_update_dispatch_stats(tg_to_blkg(tg),
+ bio->bi_size, bio->bi_rw);
+ goto out_unlock_rcu;
}
}
- rcu_read_unlock();
/*
* Either group has not been allocated yet or it is not an unlimited
* IO group
*/
spin_lock_irq(q->queue_lock);
- tg = throtl_get_tg(td);
+ tg = throtl_lookup_create_tg(td, blkcg);
if (unlikely(!tg))
goto out_unlock;
@@ -1189,6 +1188,7 @@
tg->io_disp[rw], tg->iops[rw],
tg->nr_queued[READ], tg->nr_queued[WRITE]);
+ bio_associate_current(bio);
throtl_add_bio_tg(q->td, tg, bio);
throttled = true;
@@ -1199,6 +1199,8 @@
out_unlock:
spin_unlock_irq(q->queue_lock);
+out_unlock_rcu:
+ rcu_read_unlock();
out:
return throttled;
}
@@ -1241,79 +1243,31 @@
int blk_throtl_init(struct request_queue *q)
{
struct throtl_data *td;
- struct throtl_grp *tg;
+ int ret;
td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node);
if (!td)
return -ENOMEM;
- INIT_HLIST_HEAD(&td->tg_list);
td->tg_service_tree = THROTL_RB_ROOT;
td->limits_changed = false;
INIT_DELAYED_WORK(&td->throtl_work, blk_throtl_work);
- /* alloc and Init root group. */
- td->queue = q;
- tg = throtl_alloc_tg(td);
-
- if (!tg) {
- kfree(td);
- return -ENOMEM;
- }
-
- td->root_tg = tg;
-
- rcu_read_lock();
- throtl_init_add_tg_lists(td, tg, &blkio_root_cgroup);
- rcu_read_unlock();
-
- /* Attach throtl data to request queue */
q->td = td;
- return 0;
+ td->queue = q;
+
+ /* activate policy */
+ ret = blkcg_activate_policy(q, &blkcg_policy_throtl);
+ if (ret)
+ kfree(td);
+ return ret;
}
void blk_throtl_exit(struct request_queue *q)
{
- struct throtl_data *td = q->td;
- bool wait = false;
-
- BUG_ON(!td);
-
+ BUG_ON(!q->td);
throtl_shutdown_wq(q);
-
- spin_lock_irq(q->queue_lock);
- throtl_release_tgs(td);
-
- /* If there are other groups */
- if (td->nr_undestroyed_grps > 0)
- wait = true;
-
- spin_unlock_irq(q->queue_lock);
-
- /*
- * Wait for tg->blkg->key accessors to exit their grace periods.
- * Do this wait only if there are other undestroyed groups out
- * there (other than root group). This can happen if cgroup deletion
- * path claimed the responsibility of cleaning up a group before
- * queue cleanup code get to the group.
- *
- * Do not call synchronize_rcu() unconditionally as there are drivers
- * which create/delete request queue hundreds of times during scan/boot
- * and synchronize_rcu() can take significant time and slow down boot.
- */
- if (wait)
- synchronize_rcu();
-
- /*
- * Just being safe to make sure after previous flush if some body did
- * update limits through cgroup and another work got queued, cancel
- * it.
- */
- throtl_shutdown_wq(q);
-}
-
-void blk_throtl_release(struct request_queue *q)
-{
+ blkcg_deactivate_policy(q, &blkcg_policy_throtl);
kfree(q->td);
}
@@ -1323,8 +1277,7 @@
if (!kthrotld_workqueue)
panic("Failed to create kthrotld\n");
- blkio_policy_register(&blkio_policy_throtl);
- return 0;
+ return blkcg_policy_register(&blkcg_policy_throtl);
}
module_init(throtl_init);
diff --git a/block/blk.h b/block/blk.h
index d45be87..85f6ae4 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -23,7 +23,8 @@
struct bio *bio);
int blk_rq_append_bio(struct request_queue *q, struct request *rq,
struct bio *bio);
-void blk_drain_queue(struct request_queue *q, bool drain_all);
+void blk_queue_bypass_start(struct request_queue *q);
+void blk_queue_bypass_end(struct request_queue *q);
void blk_dequeue_request(struct request *rq);
void __blk_queue_free_tags(struct request_queue *q);
bool __blk_end_bidi_request(struct request *rq, int error,
@@ -144,9 +145,6 @@
int blk_dev_init(void);
-void elv_quiesce_start(struct request_queue *q);
-void elv_quiesce_end(struct request_queue *q);
-
/*
* Return the threshold (number of used requests) at which the queue is
@@ -186,32 +184,30 @@
*/
void get_io_context(struct io_context *ioc);
struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q);
-struct io_cq *ioc_create_icq(struct request_queue *q, gfp_t gfp_mask);
+struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
+ gfp_t gfp_mask);
void ioc_clear_queue(struct request_queue *q);
-void create_io_context_slowpath(struct task_struct *task, gfp_t gfp_mask,
- int node);
+int create_task_io_context(struct task_struct *task, gfp_t gfp_mask, int node);
/**
* create_io_context - try to create task->io_context
- * @task: target task
* @gfp_mask: allocation mask
* @node: allocation node
*
- * If @task->io_context is %NULL, allocate a new io_context and install it.
- * Returns the current @task->io_context which may be %NULL if allocation
- * failed.
+ * If %current->io_context is %NULL, allocate a new io_context and install
+ * it. Returns the current %current->io_context which may be %NULL if
+ * allocation failed.
*
* Note that this function can't be called with IRQ disabled because
- * task_lock which protects @task->io_context is IRQ-unsafe.
+ * task_lock which protects %current->io_context is IRQ-unsafe.
*/
-static inline struct io_context *create_io_context(struct task_struct *task,
- gfp_t gfp_mask, int node)
+static inline struct io_context *create_io_context(gfp_t gfp_mask, int node)
{
WARN_ON_ONCE(irqs_disabled());
- if (unlikely(!task->io_context))
- create_io_context_slowpath(task, gfp_mask, node);
- return task->io_context;
+ if (unlikely(!current->io_context))
+ create_task_io_context(current, gfp_mask, node);
+ return current->io_context;
}
/*
@@ -222,7 +218,6 @@
extern void blk_throtl_drain(struct request_queue *q);
extern int blk_throtl_init(struct request_queue *q);
extern void blk_throtl_exit(struct request_queue *q);
-extern void blk_throtl_release(struct request_queue *q);
#else /* CONFIG_BLK_DEV_THROTTLING */
static inline bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
{
@@ -231,7 +226,6 @@
static inline void blk_throtl_drain(struct request_queue *q) { }
static inline int blk_throtl_init(struct request_queue *q) { return 0; }
static inline void blk_throtl_exit(struct request_queue *q) { }
-static inline void blk_throtl_release(struct request_queue *q) { }
#endif /* CONFIG_BLK_DEV_THROTTLING */
#endif /* BLK_INTERNAL_H */
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 3c38536..673c977 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -15,7 +15,9 @@
#include <linux/ioprio.h>
#include <linux/blktrace_api.h>
#include "blk.h"
-#include "cfq.h"
+#include "blk-cgroup.h"
+
+static struct blkcg_policy blkcg_policy_cfq __maybe_unused;
/*
* tunables
@@ -171,8 +173,53 @@
SYNC_WORKLOAD = 2
};
+struct cfqg_stats {
+#ifdef CONFIG_CFQ_GROUP_IOSCHED
+ /* total bytes transferred */
+ struct blkg_rwstat service_bytes;
+ /* total IOs serviced, post merge */
+ struct blkg_rwstat serviced;
+ /* number of ios merged */
+ struct blkg_rwstat merged;
+ /* total time spent on device in ns, may not be accurate w/ queueing */
+ struct blkg_rwstat service_time;
+ /* total time spent waiting in scheduler queue in ns */
+ struct blkg_rwstat wait_time;
+ /* number of IOs queued up */
+ struct blkg_rwstat queued;
+ /* total sectors transferred */
+ struct blkg_stat sectors;
+ /* total disk time and nr sectors dispatched by this group */
+ struct blkg_stat time;
+#ifdef CONFIG_DEBUG_BLK_CGROUP
+ /* time not charged to this cgroup */
+ struct blkg_stat unaccounted_time;
+ /* sum of number of ios queued across all samples */
+ struct blkg_stat avg_queue_size_sum;
+ /* count of samples taken for average */
+ struct blkg_stat avg_queue_size_samples;
+ /* how many times this group has been removed from service tree */
+ struct blkg_stat dequeue;
+ /* total time spent waiting for it to be assigned a timeslice. */
+ struct blkg_stat group_wait_time;
+ /* time spent idling for this blkcg_gq */
+ struct blkg_stat idle_time;
+ /* total time with empty current active q with other requests queued */
+ struct blkg_stat empty_time;
+ /* fields after this shouldn't be cleared on stat reset */
+ uint64_t start_group_wait_time;
+ uint64_t start_idle_time;
+ uint64_t start_empty_time;
+ uint16_t flags;
+#endif /* CONFIG_DEBUG_BLK_CGROUP */
+#endif /* CONFIG_CFQ_GROUP_IOSCHED */
+};
+
/* This is per cgroup per device grouping structure */
struct cfq_group {
+ /* must be the first member */
+ struct blkg_policy_data pd;
+
/* group service_tree member */
struct rb_node rb_node;
@@ -180,7 +227,7 @@
u64 vdisktime;
unsigned int weight;
unsigned int new_weight;
- bool needs_update;
+ unsigned int dev_weight;
/* number of cfqq currently on this group */
int nr_cfqq;
@@ -206,20 +253,21 @@
unsigned long saved_workload_slice;
enum wl_type_t saved_workload;
enum wl_prio_t saved_serving_prio;
- struct blkio_group blkg;
-#ifdef CONFIG_CFQ_GROUP_IOSCHED
- struct hlist_node cfqd_node;
- int ref;
-#endif
+
/* number of requests that are on the dispatch list or inside driver */
int dispatched;
struct cfq_ttime ttime;
+ struct cfqg_stats stats;
};
struct cfq_io_cq {
struct io_cq icq; /* must be the first member */
struct cfq_queue *cfqq[2];
struct cfq_ttime ttime;
+ int ioprio; /* the current ioprio */
+#ifdef CONFIG_CFQ_GROUP_IOSCHED
+ uint64_t blkcg_id; /* the current blkcg ID */
+#endif
};
/*
@@ -229,7 +277,7 @@
struct request_queue *queue;
/* Root service tree for cfq_groups */
struct cfq_rb_root grp_service_tree;
- struct cfq_group root_group;
+ struct cfq_group *root_group;
/*
* The priority currently being served
@@ -303,12 +351,6 @@
struct cfq_queue oom_cfqq;
unsigned long last_delayed_sync;
-
- /* List of cfq groups being managed on this device*/
- struct hlist_head cfqg_list;
-
- /* Number of groups which are on blkcg->blkg_list */
- unsigned int nr_blkcg_linked_grps;
};
static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd);
@@ -371,21 +413,284 @@
CFQ_CFQQ_FNS(wait_busy);
#undef CFQ_CFQQ_FNS
+static inline struct cfq_group *pd_to_cfqg(struct blkg_policy_data *pd)
+{
+ return pd ? container_of(pd, struct cfq_group, pd) : NULL;
+}
+
+static inline struct cfq_group *blkg_to_cfqg(struct blkcg_gq *blkg)
+{
+ return pd_to_cfqg(blkg_to_pd(blkg, &blkcg_policy_cfq));
+}
+
+static inline struct blkcg_gq *cfqg_to_blkg(struct cfq_group *cfqg)
+{
+ return pd_to_blkg(&cfqg->pd);
+}
+
+#if defined(CONFIG_CFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
+
+/* cfqg stats flags */
+enum cfqg_stats_flags {
+ CFQG_stats_waiting = 0,
+ CFQG_stats_idling,
+ CFQG_stats_empty,
+};
+
+#define CFQG_FLAG_FNS(name) \
+static inline void cfqg_stats_mark_##name(struct cfqg_stats *stats) \
+{ \
+ stats->flags |= (1 << CFQG_stats_##name); \
+} \
+static inline void cfqg_stats_clear_##name(struct cfqg_stats *stats) \
+{ \
+ stats->flags &= ~(1 << CFQG_stats_##name); \
+} \
+static inline int cfqg_stats_##name(struct cfqg_stats *stats) \
+{ \
+ return (stats->flags & (1 << CFQG_stats_##name)) != 0; \
+} \
+
+CFQG_FLAG_FNS(waiting)
+CFQG_FLAG_FNS(idling)
+CFQG_FLAG_FNS(empty)
+#undef CFQG_FLAG_FNS
+
+/* This should be called with the queue_lock held. */
+static void cfqg_stats_update_group_wait_time(struct cfqg_stats *stats)
+{
+ unsigned long long now;
+
+ if (!cfqg_stats_waiting(stats))
+ return;
+
+ now = sched_clock();
+ if (time_after64(now, stats->start_group_wait_time))
+ blkg_stat_add(&stats->group_wait_time,
+ now - stats->start_group_wait_time);
+ cfqg_stats_clear_waiting(stats);
+}
+
+/* This should be called with the queue_lock held. */
+static void cfqg_stats_set_start_group_wait_time(struct cfq_group *cfqg,
+ struct cfq_group *curr_cfqg)
+{
+ struct cfqg_stats *stats = &cfqg->stats;
+
+ if (cfqg_stats_waiting(stats))
+ return;
+ if (cfqg == curr_cfqg)
+ return;
+ stats->start_group_wait_time = sched_clock();
+ cfqg_stats_mark_waiting(stats);
+}
+
+/* This should be called with the queue_lock held. */
+static void cfqg_stats_end_empty_time(struct cfqg_stats *stats)
+{
+ unsigned long long now;
+
+ if (!cfqg_stats_empty(stats))
+ return;
+
+ now = sched_clock();
+ if (time_after64(now, stats->start_empty_time))
+ blkg_stat_add(&stats->empty_time,
+ now - stats->start_empty_time);
+ cfqg_stats_clear_empty(stats);
+}
+
+static void cfqg_stats_update_dequeue(struct cfq_group *cfqg)
+{
+ blkg_stat_add(&cfqg->stats.dequeue, 1);
+}
+
+static void cfqg_stats_set_start_empty_time(struct cfq_group *cfqg)
+{
+ struct cfqg_stats *stats = &cfqg->stats;
+
+ if (blkg_rwstat_sum(&stats->queued))
+ return;
+
+ /*
+ * group is already marked empty. This can happen if cfqq got new
+ * request in parent group and moved to this group while being added
+ * to service tree. Just ignore the event and move on.
+ */
+ if (cfqg_stats_empty(stats))
+ return;
+
+ stats->start_empty_time = sched_clock();
+ cfqg_stats_mark_empty(stats);
+}
+
+static void cfqg_stats_update_idle_time(struct cfq_group *cfqg)
+{
+ struct cfqg_stats *stats = &cfqg->stats;
+
+ if (cfqg_stats_idling(stats)) {
+ unsigned long long now = sched_clock();
+
+ if (time_after64(now, stats->start_idle_time))
+ blkg_stat_add(&stats->idle_time,
+ now - stats->start_idle_time);
+ cfqg_stats_clear_idling(stats);
+ }
+}
+
+static void cfqg_stats_set_start_idle_time(struct cfq_group *cfqg)
+{
+ struct cfqg_stats *stats = &cfqg->stats;
+
+ BUG_ON(cfqg_stats_idling(stats));
+
+ stats->start_idle_time = sched_clock();
+ cfqg_stats_mark_idling(stats);
+}
+
+static void cfqg_stats_update_avg_queue_size(struct cfq_group *cfqg)
+{
+ struct cfqg_stats *stats = &cfqg->stats;
+
+ blkg_stat_add(&stats->avg_queue_size_sum,
+ blkg_rwstat_sum(&stats->queued));
+ blkg_stat_add(&stats->avg_queue_size_samples, 1);
+ cfqg_stats_update_group_wait_time(stats);
+}
+
+#else /* CONFIG_CFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */
+
+static inline void cfqg_stats_set_start_group_wait_time(struct cfq_group *cfqg, struct cfq_group *curr_cfqg) { }
+static inline void cfqg_stats_end_empty_time(struct cfqg_stats *stats) { }
+static inline void cfqg_stats_update_dequeue(struct cfq_group *cfqg) { }
+static inline void cfqg_stats_set_start_empty_time(struct cfq_group *cfqg) { }
+static inline void cfqg_stats_update_idle_time(struct cfq_group *cfqg) { }
+static inline void cfqg_stats_set_start_idle_time(struct cfq_group *cfqg) { }
+static inline void cfqg_stats_update_avg_queue_size(struct cfq_group *cfqg) { }
+
+#endif /* CONFIG_CFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */
+
#ifdef CONFIG_CFQ_GROUP_IOSCHED
-#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \
+
+static inline void cfqg_get(struct cfq_group *cfqg)
+{
+ return blkg_get(cfqg_to_blkg(cfqg));
+}
+
+static inline void cfqg_put(struct cfq_group *cfqg)
+{
+ return blkg_put(cfqg_to_blkg(cfqg));
+}
+
+#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) do { \
+ char __pbuf[128]; \
+ \
+ blkg_path(cfqg_to_blkg((cfqq)->cfqg), __pbuf, sizeof(__pbuf)); \
blk_add_trace_msg((cfqd)->queue, "cfq%d%c %s " fmt, (cfqq)->pid, \
- cfq_cfqq_sync((cfqq)) ? 'S' : 'A', \
- blkg_path(&(cfqq)->cfqg->blkg), ##args)
+ cfq_cfqq_sync((cfqq)) ? 'S' : 'A', \
+ __pbuf, ##args); \
+} while (0)
-#define cfq_log_cfqg(cfqd, cfqg, fmt, args...) \
- blk_add_trace_msg((cfqd)->queue, "%s " fmt, \
- blkg_path(&(cfqg)->blkg), ##args) \
+#define cfq_log_cfqg(cfqd, cfqg, fmt, args...) do { \
+ char __pbuf[128]; \
+ \
+ blkg_path(cfqg_to_blkg(cfqg), __pbuf, sizeof(__pbuf)); \
+ blk_add_trace_msg((cfqd)->queue, "%s " fmt, __pbuf, ##args); \
+} while (0)
-#else
+static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg,
+ struct cfq_group *curr_cfqg, int rw)
+{
+ blkg_rwstat_add(&cfqg->stats.queued, rw, 1);
+ cfqg_stats_end_empty_time(&cfqg->stats);
+ cfqg_stats_set_start_group_wait_time(cfqg, curr_cfqg);
+}
+
+static inline void cfqg_stats_update_timeslice_used(struct cfq_group *cfqg,
+ unsigned long time, unsigned long unaccounted_time)
+{
+ blkg_stat_add(&cfqg->stats.time, time);
+#ifdef CONFIG_DEBUG_BLK_CGROUP
+ blkg_stat_add(&cfqg->stats.unaccounted_time, unaccounted_time);
+#endif
+}
+
+static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg, int rw)
+{
+ blkg_rwstat_add(&cfqg->stats.queued, rw, -1);
+}
+
+static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg, int rw)
+{
+ blkg_rwstat_add(&cfqg->stats.merged, rw, 1);
+}
+
+static inline void cfqg_stats_update_dispatch(struct cfq_group *cfqg,
+ uint64_t bytes, int rw)
+{
+ blkg_stat_add(&cfqg->stats.sectors, bytes >> 9);
+ blkg_rwstat_add(&cfqg->stats.serviced, rw, 1);
+ blkg_rwstat_add(&cfqg->stats.service_bytes, rw, bytes);
+}
+
+static inline void cfqg_stats_update_completion(struct cfq_group *cfqg,
+ uint64_t start_time, uint64_t io_start_time, int rw)
+{
+ struct cfqg_stats *stats = &cfqg->stats;
+ unsigned long long now = sched_clock();
+
+ if (time_after64(now, io_start_time))
+ blkg_rwstat_add(&stats->service_time, rw, now - io_start_time);
+ if (time_after64(io_start_time, start_time))
+ blkg_rwstat_add(&stats->wait_time, rw,
+ io_start_time - start_time);
+}
+
+static void cfq_pd_reset_stats(struct blkcg_gq *blkg)
+{
+ struct cfq_group *cfqg = blkg_to_cfqg(blkg);
+ struct cfqg_stats *stats = &cfqg->stats;
+
+ /* queued stats shouldn't be cleared */
+ blkg_rwstat_reset(&stats->service_bytes);
+ blkg_rwstat_reset(&stats->serviced);
+ blkg_rwstat_reset(&stats->merged);
+ blkg_rwstat_reset(&stats->service_time);
+ blkg_rwstat_reset(&stats->wait_time);
+ blkg_stat_reset(&stats->time);
+#ifdef CONFIG_DEBUG_BLK_CGROUP
+ blkg_stat_reset(&stats->unaccounted_time);
+ blkg_stat_reset(&stats->avg_queue_size_sum);
+ blkg_stat_reset(&stats->avg_queue_size_samples);
+ blkg_stat_reset(&stats->dequeue);
+ blkg_stat_reset(&stats->group_wait_time);
+ blkg_stat_reset(&stats->idle_time);
+ blkg_stat_reset(&stats->empty_time);
+#endif
+}
+
+#else /* CONFIG_CFQ_GROUP_IOSCHED */
+
+static inline void cfqg_get(struct cfq_group *cfqg) { }
+static inline void cfqg_put(struct cfq_group *cfqg) { }
+
#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \
blk_add_trace_msg((cfqd)->queue, "cfq%d " fmt, (cfqq)->pid, ##args)
#define cfq_log_cfqg(cfqd, cfqg, fmt, args...) do {} while (0)
-#endif
+
+static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg,
+ struct cfq_group *curr_cfqg, int rw) { }
+static inline void cfqg_stats_update_timeslice_used(struct cfq_group *cfqg,
+ unsigned long time, unsigned long unaccounted_time) { }
+static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg, int rw) { }
+static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg, int rw) { }
+static inline void cfqg_stats_update_dispatch(struct cfq_group *cfqg,
+ uint64_t bytes, int rw) { }
+static inline void cfqg_stats_update_completion(struct cfq_group *cfqg,
+ uint64_t start_time, uint64_t io_start_time, int rw) { }
+
+#endif /* CONFIG_CFQ_GROUP_IOSCHED */
+
#define cfq_log(cfqd, fmt, args...) \
blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args)
@@ -466,8 +771,9 @@
}
static void cfq_dispatch_insert(struct request_queue *, struct request *);
-static struct cfq_queue *cfq_get_queue(struct cfq_data *, bool,
- struct io_context *, gfp_t);
+static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, bool is_sync,
+ struct cfq_io_cq *cic, struct bio *bio,
+ gfp_t gfp_mask);
static inline struct cfq_io_cq *icq_to_cic(struct io_cq *icq)
{
@@ -545,7 +851,7 @@
{
u64 d = delta << CFQ_SERVICE_SHIFT;
- d = d * BLKIO_WEIGHT_DEFAULT;
+ d = d * CFQ_WEIGHT_DEFAULT;
do_div(d, cfqg->weight);
return d;
}
@@ -872,9 +1178,9 @@
cfq_update_group_weight(struct cfq_group *cfqg)
{
BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
- if (cfqg->needs_update) {
+ if (cfqg->new_weight) {
cfqg->weight = cfqg->new_weight;
- cfqg->needs_update = false;
+ cfqg->new_weight = 0;
}
}
@@ -936,7 +1242,7 @@
cfq_log_cfqg(cfqd, cfqg, "del_from_rr group");
cfq_group_service_tree_del(st, cfqg);
cfqg->saved_workload_slice = 0;
- cfq_blkiocg_update_dequeue_stats(&cfqg->blkg, 1);
+ cfqg_stats_update_dequeue(cfqg);
}
static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq,
@@ -1008,178 +1314,59 @@
"sl_used=%u disp=%u charge=%u iops=%u sect=%lu",
used_sl, cfqq->slice_dispatch, charge,
iops_mode(cfqd), cfqq->nr_sectors);
- cfq_blkiocg_update_timeslice_used(&cfqg->blkg, used_sl,
- unaccounted_sl);
- cfq_blkiocg_set_start_empty_time(&cfqg->blkg);
+ cfqg_stats_update_timeslice_used(cfqg, used_sl, unaccounted_sl);
+ cfqg_stats_set_start_empty_time(cfqg);
}
-#ifdef CONFIG_CFQ_GROUP_IOSCHED
-static inline struct cfq_group *cfqg_of_blkg(struct blkio_group *blkg)
-{
- if (blkg)
- return container_of(blkg, struct cfq_group, blkg);
- return NULL;
-}
-
-static void cfq_update_blkio_group_weight(void *key, struct blkio_group *blkg,
- unsigned int weight)
-{
- struct cfq_group *cfqg = cfqg_of_blkg(blkg);
- cfqg->new_weight = weight;
- cfqg->needs_update = true;
-}
-
-static void cfq_init_add_cfqg_lists(struct cfq_data *cfqd,
- struct cfq_group *cfqg, struct blkio_cgroup *blkcg)
-{
- struct backing_dev_info *bdi = &cfqd->queue->backing_dev_info;
- unsigned int major, minor;
-
- /*
- * Add group onto cgroup list. It might happen that bdi->dev is
- * not initialized yet. Initialize this new group without major
- * and minor info and this info will be filled in once a new thread
- * comes for IO.
- */
- if (bdi->dev) {
- sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
- cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg,
- (void *)cfqd, MKDEV(major, minor));
- } else
- cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg,
- (void *)cfqd, 0);
-
- cfqd->nr_blkcg_linked_grps++;
- cfqg->weight = blkcg_get_weight(blkcg, cfqg->blkg.dev);
-
- /* Add group on cfqd list */
- hlist_add_head(&cfqg->cfqd_node, &cfqd->cfqg_list);
-}
-
-/*
- * Should be called from sleepable context. No request queue lock as per
- * cpu stats are allocated dynamically and alloc_percpu needs to be called
- * from sleepable context.
+/**
+ * cfq_init_cfqg_base - initialize base part of a cfq_group
+ * @cfqg: cfq_group to initialize
+ *
+ * Initialize the base part which is used whether %CONFIG_CFQ_GROUP_IOSCHED
+ * is enabled or not.
*/
-static struct cfq_group * cfq_alloc_cfqg(struct cfq_data *cfqd)
+static void cfq_init_cfqg_base(struct cfq_group *cfqg)
{
- struct cfq_group *cfqg = NULL;
- int i, j, ret;
struct cfq_rb_root *st;
-
- cfqg = kzalloc_node(sizeof(*cfqg), GFP_ATOMIC, cfqd->queue->node);
- if (!cfqg)
- return NULL;
+ int i, j;
for_each_cfqg_st(cfqg, i, j, st)
*st = CFQ_RB_ROOT;
RB_CLEAR_NODE(&cfqg->rb_node);
cfqg->ttime.last_end_request = jiffies;
-
- /*
- * Take the initial reference that will be released on destroy
- * This can be thought of a joint reference by cgroup and
- * elevator which will be dropped by either elevator exit
- * or cgroup deletion path depending on who is exiting first.
- */
- cfqg->ref = 1;
-
- ret = blkio_alloc_blkg_stats(&cfqg->blkg);
- if (ret) {
- kfree(cfqg);
- return NULL;
- }
-
- return cfqg;
}
-static struct cfq_group *
-cfq_find_cfqg(struct cfq_data *cfqd, struct blkio_cgroup *blkcg)
+#ifdef CONFIG_CFQ_GROUP_IOSCHED
+static void cfq_pd_init(struct blkcg_gq *blkg)
{
- struct cfq_group *cfqg = NULL;
- void *key = cfqd;
- struct backing_dev_info *bdi = &cfqd->queue->backing_dev_info;
- unsigned int major, minor;
+ struct cfq_group *cfqg = blkg_to_cfqg(blkg);
- /*
- * This is the common case when there are no blkio cgroups.
- * Avoid lookup in this case
- */
- if (blkcg == &blkio_root_cgroup)
- cfqg = &cfqd->root_group;
- else
- cfqg = cfqg_of_blkg(blkiocg_lookup_group(blkcg, key));
-
- if (cfqg && !cfqg->blkg.dev && bdi->dev && dev_name(bdi->dev)) {
- sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
- cfqg->blkg.dev = MKDEV(major, minor);
- }
-
- return cfqg;
+ cfq_init_cfqg_base(cfqg);
+ cfqg->weight = blkg->blkcg->cfq_weight;
}
/*
* Search for the cfq group current task belongs to. request_queue lock must
* be held.
*/
-static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd)
+static struct cfq_group *cfq_lookup_create_cfqg(struct cfq_data *cfqd,
+ struct blkcg *blkcg)
{
- struct blkio_cgroup *blkcg;
- struct cfq_group *cfqg = NULL, *__cfqg = NULL;
struct request_queue *q = cfqd->queue;
+ struct cfq_group *cfqg = NULL;
- rcu_read_lock();
- blkcg = task_blkio_cgroup(current);
- cfqg = cfq_find_cfqg(cfqd, blkcg);
- if (cfqg) {
- rcu_read_unlock();
- return cfqg;
+ /* avoid lookup for the common case where there's no blkcg */
+ if (blkcg == &blkcg_root) {
+ cfqg = cfqd->root_group;
+ } else {
+ struct blkcg_gq *blkg;
+
+ blkg = blkg_lookup_create(blkcg, q);
+ if (!IS_ERR(blkg))
+ cfqg = blkg_to_cfqg(blkg);
}
- /*
- * Need to allocate a group. Allocation of group also needs allocation
- * of per cpu stats which in-turn takes a mutex() and can block. Hence
- * we need to drop rcu lock and queue_lock before we call alloc.
- *
- * Not taking any queue reference here and assuming that queue is
- * around by the time we return. CFQ queue allocation code does
- * the same. It might be racy though.
- */
-
- rcu_read_unlock();
- spin_unlock_irq(q->queue_lock);
-
- cfqg = cfq_alloc_cfqg(cfqd);
-
- spin_lock_irq(q->queue_lock);
-
- rcu_read_lock();
- blkcg = task_blkio_cgroup(current);
-
- /*
- * If some other thread already allocated the group while we were
- * not holding queue lock, free up the group
- */
- __cfqg = cfq_find_cfqg(cfqd, blkcg);
-
- if (__cfqg) {
- kfree(cfqg);
- rcu_read_unlock();
- return __cfqg;
- }
-
- if (!cfqg)
- cfqg = &cfqd->root_group;
-
- cfq_init_add_cfqg_lists(cfqd, cfqg, blkcg);
- rcu_read_unlock();
- return cfqg;
-}
-
-static inline struct cfq_group *cfq_ref_get_cfqg(struct cfq_group *cfqg)
-{
- cfqg->ref++;
return cfqg;
}
@@ -1187,94 +1374,224 @@
{
/* Currently, all async queues are mapped to root group */
if (!cfq_cfqq_sync(cfqq))
- cfqg = &cfqq->cfqd->root_group;
+ cfqg = cfqq->cfqd->root_group;
cfqq->cfqg = cfqg;
/* cfqq reference on cfqg */
- cfqq->cfqg->ref++;
+ cfqg_get(cfqg);
}
-static void cfq_put_cfqg(struct cfq_group *cfqg)
+static u64 cfqg_prfill_weight_device(struct seq_file *sf,
+ struct blkg_policy_data *pd, int off)
{
- struct cfq_rb_root *st;
- int i, j;
+ struct cfq_group *cfqg = pd_to_cfqg(pd);
- BUG_ON(cfqg->ref <= 0);
- cfqg->ref--;
- if (cfqg->ref)
- return;
- for_each_cfqg_st(cfqg, i, j, st)
- BUG_ON(!RB_EMPTY_ROOT(&st->rb));
- free_percpu(cfqg->blkg.stats_cpu);
- kfree(cfqg);
+ if (!cfqg->dev_weight)
+ return 0;
+ return __blkg_prfill_u64(sf, pd, cfqg->dev_weight);
}
-static void cfq_destroy_cfqg(struct cfq_data *cfqd, struct cfq_group *cfqg)
+static int cfqg_print_weight_device(struct cgroup *cgrp, struct cftype *cft,
+ struct seq_file *sf)
{
- /* Something wrong if we are trying to remove same group twice */
- BUG_ON(hlist_unhashed(&cfqg->cfqd_node));
-
- hlist_del_init(&cfqg->cfqd_node);
-
- BUG_ON(cfqd->nr_blkcg_linked_grps <= 0);
- cfqd->nr_blkcg_linked_grps--;
-
- /*
- * Put the reference taken at the time of creation so that when all
- * queues are gone, group can be destroyed.
- */
- cfq_put_cfqg(cfqg);
+ blkcg_print_blkgs(sf, cgroup_to_blkcg(cgrp),
+ cfqg_prfill_weight_device, &blkcg_policy_cfq, 0,
+ false);
+ return 0;
}
-static void cfq_release_cfq_groups(struct cfq_data *cfqd)
+static int cfq_print_weight(struct cgroup *cgrp, struct cftype *cft,
+ struct seq_file *sf)
{
- struct hlist_node *pos, *n;
+ seq_printf(sf, "%u\n", cgroup_to_blkcg(cgrp)->cfq_weight);
+ return 0;
+}
+
+static int cfqg_set_weight_device(struct cgroup *cgrp, struct cftype *cft,
+ const char *buf)
+{
+ struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
+ struct blkg_conf_ctx ctx;
struct cfq_group *cfqg;
+ int ret;
- hlist_for_each_entry_safe(cfqg, pos, n, &cfqd->cfqg_list, cfqd_node) {
- /*
- * If cgroup removal path got to blk_group first and removed
- * it from cgroup list, then it will take care of destroying
- * cfqg also.
- */
- if (!cfq_blkiocg_del_blkio_group(&cfqg->blkg))
- cfq_destroy_cfqg(cfqd, cfqg);
+ ret = blkg_conf_prep(blkcg, &blkcg_policy_cfq, buf, &ctx);
+ if (ret)
+ return ret;
+
+ ret = -EINVAL;
+ cfqg = blkg_to_cfqg(ctx.blkg);
+ if (!ctx.v || (ctx.v >= CFQ_WEIGHT_MIN && ctx.v <= CFQ_WEIGHT_MAX)) {
+ cfqg->dev_weight = ctx.v;
+ cfqg->new_weight = cfqg->dev_weight ?: blkcg->cfq_weight;
+ ret = 0;
}
+
+ blkg_conf_finish(&ctx);
+ return ret;
}
-/*
- * Blk cgroup controller notification saying that blkio_group object is being
- * delinked as associated cgroup object is going away. That also means that
- * no new IO will come in this group. So get rid of this group as soon as
- * any pending IO in the group is finished.
- *
- * This function is called under rcu_read_lock(). key is the rcu protected
- * pointer. That means "key" is a valid cfq_data pointer as long as we are rcu
- * read lock.
- *
- * "key" was fetched from blkio_group under blkio_cgroup->lock. That means
- * it should not be NULL as even if elevator was exiting, cgroup deltion
- * path got to it first.
- */
-static void cfq_unlink_blkio_group(void *key, struct blkio_group *blkg)
+static int cfq_set_weight(struct cgroup *cgrp, struct cftype *cft, u64 val)
{
- unsigned long flags;
- struct cfq_data *cfqd = key;
+ struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
+ struct blkcg_gq *blkg;
+ struct hlist_node *n;
- spin_lock_irqsave(cfqd->queue->queue_lock, flags);
- cfq_destroy_cfqg(cfqd, cfqg_of_blkg(blkg));
- spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
+ if (val < CFQ_WEIGHT_MIN || val > CFQ_WEIGHT_MAX)
+ return -EINVAL;
+
+ spin_lock_irq(&blkcg->lock);
+ blkcg->cfq_weight = (unsigned int)val;
+
+ hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
+ struct cfq_group *cfqg = blkg_to_cfqg(blkg);
+
+ if (cfqg && !cfqg->dev_weight)
+ cfqg->new_weight = blkcg->cfq_weight;
+ }
+
+ spin_unlock_irq(&blkcg->lock);
+ return 0;
}
+static int cfqg_print_stat(struct cgroup *cgrp, struct cftype *cft,
+ struct seq_file *sf)
+{
+ struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
+
+ blkcg_print_blkgs(sf, blkcg, blkg_prfill_stat, &blkcg_policy_cfq,
+ cft->private, false);
+ return 0;
+}
+
+static int cfqg_print_rwstat(struct cgroup *cgrp, struct cftype *cft,
+ struct seq_file *sf)
+{
+ struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
+
+ blkcg_print_blkgs(sf, blkcg, blkg_prfill_rwstat, &blkcg_policy_cfq,
+ cft->private, true);
+ return 0;
+}
+
+#ifdef CONFIG_DEBUG_BLK_CGROUP
+static u64 cfqg_prfill_avg_queue_size(struct seq_file *sf,
+ struct blkg_policy_data *pd, int off)
+{
+ struct cfq_group *cfqg = pd_to_cfqg(pd);
+ u64 samples = blkg_stat_read(&cfqg->stats.avg_queue_size_samples);
+ u64 v = 0;
+
+ if (samples) {
+ v = blkg_stat_read(&cfqg->stats.avg_queue_size_sum);
+ do_div(v, samples);
+ }
+ __blkg_prfill_u64(sf, pd, v);
+ return 0;
+}
+
+/* print avg_queue_size */
+static int cfqg_print_avg_queue_size(struct cgroup *cgrp, struct cftype *cft,
+ struct seq_file *sf)
+{
+ struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
+
+ blkcg_print_blkgs(sf, blkcg, cfqg_prfill_avg_queue_size,
+ &blkcg_policy_cfq, 0, false);
+ return 0;
+}
+#endif /* CONFIG_DEBUG_BLK_CGROUP */
+
+static struct cftype cfq_blkcg_files[] = {
+ {
+ .name = "weight_device",
+ .read_seq_string = cfqg_print_weight_device,
+ .write_string = cfqg_set_weight_device,
+ .max_write_len = 256,
+ },
+ {
+ .name = "weight",
+ .read_seq_string = cfq_print_weight,
+ .write_u64 = cfq_set_weight,
+ },
+ {
+ .name = "time",
+ .private = offsetof(struct cfq_group, stats.time),
+ .read_seq_string = cfqg_print_stat,
+ },
+ {
+ .name = "sectors",
+ .private = offsetof(struct cfq_group, stats.sectors),
+ .read_seq_string = cfqg_print_stat,
+ },
+ {
+ .name = "io_service_bytes",
+ .private = offsetof(struct cfq_group, stats.service_bytes),
+ .read_seq_string = cfqg_print_rwstat,
+ },
+ {
+ .name = "io_serviced",
+ .private = offsetof(struct cfq_group, stats.serviced),
+ .read_seq_string = cfqg_print_rwstat,
+ },
+ {
+ .name = "io_service_time",
+ .private = offsetof(struct cfq_group, stats.service_time),
+ .read_seq_string = cfqg_print_rwstat,
+ },
+ {
+ .name = "io_wait_time",
+ .private = offsetof(struct cfq_group, stats.wait_time),
+ .read_seq_string = cfqg_print_rwstat,
+ },
+ {
+ .name = "io_merged",
+ .private = offsetof(struct cfq_group, stats.merged),
+ .read_seq_string = cfqg_print_rwstat,
+ },
+ {
+ .name = "io_queued",
+ .private = offsetof(struct cfq_group, stats.queued),
+ .read_seq_string = cfqg_print_rwstat,
+ },
+#ifdef CONFIG_DEBUG_BLK_CGROUP
+ {
+ .name = "avg_queue_size",
+ .read_seq_string = cfqg_print_avg_queue_size,
+ },
+ {
+ .name = "group_wait_time",
+ .private = offsetof(struct cfq_group, stats.group_wait_time),
+ .read_seq_string = cfqg_print_stat,
+ },
+ {
+ .name = "idle_time",
+ .private = offsetof(struct cfq_group, stats.idle_time),
+ .read_seq_string = cfqg_print_stat,
+ },
+ {
+ .name = "empty_time",
+ .private = offsetof(struct cfq_group, stats.empty_time),
+ .read_seq_string = cfqg_print_stat,
+ },
+ {
+ .name = "dequeue",
+ .private = offsetof(struct cfq_group, stats.dequeue),
+ .read_seq_string = cfqg_print_stat,
+ },
+ {
+ .name = "unaccounted_time",
+ .private = offsetof(struct cfq_group, stats.unaccounted_time),
+ .read_seq_string = cfqg_print_stat,
+ },
+#endif /* CONFIG_DEBUG_BLK_CGROUP */
+ { } /* terminate */
+};
#else /* GROUP_IOSCHED */
-static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd)
+static struct cfq_group *cfq_lookup_create_cfqg(struct cfq_data *cfqd,
+ struct blkcg *blkcg)
{
- return &cfqd->root_group;
-}
-
-static inline struct cfq_group *cfq_ref_get_cfqg(struct cfq_group *cfqg)
-{
- return cfqg;
+ return cfqd->root_group;
}
static inline void
@@ -1282,9 +1599,6 @@
cfqq->cfqg = cfqg;
}
-static void cfq_release_cfq_groups(struct cfq_data *cfqd) {}
-static inline void cfq_put_cfqg(struct cfq_group *cfqg) {}
-
#endif /* GROUP_IOSCHED */
/*
@@ -1551,12 +1865,10 @@
{
elv_rb_del(&cfqq->sort_list, rq);
cfqq->queued[rq_is_sync(rq)]--;
- cfq_blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg,
- rq_data_dir(rq), rq_is_sync(rq));
+ cfqg_stats_update_io_remove(RQ_CFQG(rq), rq->cmd_flags);
cfq_add_rq_rb(rq);
- cfq_blkiocg_update_io_add_stats(&(RQ_CFQG(rq))->blkg,
- &cfqq->cfqd->serving_group->blkg, rq_data_dir(rq),
- rq_is_sync(rq));
+ cfqg_stats_update_io_add(RQ_CFQG(rq), cfqq->cfqd->serving_group,
+ rq->cmd_flags);
}
static struct request *
@@ -1612,8 +1924,7 @@
cfq_del_rq_rb(rq);
cfqq->cfqd->rq_queued--;
- cfq_blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg,
- rq_data_dir(rq), rq_is_sync(rq));
+ cfqg_stats_update_io_remove(RQ_CFQG(rq), rq->cmd_flags);
if (rq->cmd_flags & REQ_PRIO) {
WARN_ON(!cfqq->prio_pending);
cfqq->prio_pending--;
@@ -1648,8 +1959,7 @@
static void cfq_bio_merged(struct request_queue *q, struct request *req,
struct bio *bio)
{
- cfq_blkiocg_update_io_merged_stats(&(RQ_CFQG(req))->blkg,
- bio_data_dir(bio), cfq_bio_sync(bio));
+ cfqg_stats_update_io_merged(RQ_CFQG(req), bio->bi_rw);
}
static void
@@ -1671,8 +1981,7 @@
if (cfqq->next_rq == next)
cfqq->next_rq = rq;
cfq_remove_request(next);
- cfq_blkiocg_update_io_merged_stats(&(RQ_CFQG(rq))->blkg,
- rq_data_dir(next), rq_is_sync(next));
+ cfqg_stats_update_io_merged(RQ_CFQG(rq), next->cmd_flags);
cfqq = RQ_CFQQ(next);
/*
@@ -1713,7 +2022,7 @@
static inline void cfq_del_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
del_timer(&cfqd->idle_slice_timer);
- cfq_blkiocg_update_idle_time_stats(&cfqq->cfqg->blkg);
+ cfqg_stats_update_idle_time(cfqq->cfqg);
}
static void __cfq_set_active_queue(struct cfq_data *cfqd,
@@ -1722,7 +2031,7 @@
if (cfqq) {
cfq_log_cfqq(cfqd, cfqq, "set_active wl_prio:%d wl_type:%d",
cfqd->serving_prio, cfqd->serving_type);
- cfq_blkiocg_update_avg_queue_size_stats(&cfqq->cfqg->blkg);
+ cfqg_stats_update_avg_queue_size(cfqq->cfqg);
cfqq->slice_start = 0;
cfqq->dispatch_start = jiffies;
cfqq->allocated_slice = 0;
@@ -2043,7 +2352,7 @@
* task has exited, don't wait
*/
cic = cfqd->active_cic;
- if (!cic || !atomic_read(&cic->icq.ioc->nr_tasks))
+ if (!cic || !atomic_read(&cic->icq.ioc->active_ref))
return;
/*
@@ -2070,7 +2379,7 @@
sl = cfqd->cfq_slice_idle;
mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
- cfq_blkiocg_update_set_idle_time_stats(&cfqq->cfqg->blkg);
+ cfqg_stats_set_start_idle_time(cfqq->cfqg);
cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu group_idle: %d", sl,
group_idle ? 1 : 0);
}
@@ -2093,8 +2402,7 @@
cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++;
cfqq->nr_sectors += blk_rq_sectors(rq);
- cfq_blkiocg_update_dispatch_stats(&cfqq->cfqg->blkg, blk_rq_bytes(rq),
- rq_data_dir(rq), rq_is_sync(rq));
+ cfqg_stats_update_dispatch(cfqq->cfqg, blk_rq_bytes(rq), rq->cmd_flags);
}
/*
@@ -2677,7 +2985,7 @@
BUG_ON(cfq_cfqq_on_rr(cfqq));
kmem_cache_free(cfq_pool, cfqq);
- cfq_put_cfqg(cfqg);
+ cfqg_put(cfqg);
}
static void cfq_put_cooperator(struct cfq_queue *cfqq)
@@ -2736,7 +3044,7 @@
}
}
-static void cfq_init_prio_data(struct cfq_queue *cfqq, struct io_context *ioc)
+static void cfq_init_prio_data(struct cfq_queue *cfqq, struct cfq_io_cq *cic)
{
struct task_struct *tsk = current;
int ioprio_class;
@@ -2744,7 +3052,7 @@
if (!cfq_cfqq_prio_changed(cfqq))
return;
- ioprio_class = IOPRIO_PRIO_CLASS(ioc->ioprio);
+ ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio);
switch (ioprio_class) {
default:
printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class);
@@ -2756,11 +3064,11 @@
cfqq->ioprio_class = task_nice_ioclass(tsk);
break;
case IOPRIO_CLASS_RT:
- cfqq->ioprio = task_ioprio(ioc);
+ cfqq->ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
cfqq->ioprio_class = IOPRIO_CLASS_RT;
break;
case IOPRIO_CLASS_BE:
- cfqq->ioprio = task_ioprio(ioc);
+ cfqq->ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
cfqq->ioprio_class = IOPRIO_CLASS_BE;
break;
case IOPRIO_CLASS_IDLE:
@@ -2778,19 +3086,24 @@
cfq_clear_cfqq_prio_changed(cfqq);
}
-static void changed_ioprio(struct cfq_io_cq *cic)
+static void check_ioprio_changed(struct cfq_io_cq *cic, struct bio *bio)
{
+ int ioprio = cic->icq.ioc->ioprio;
struct cfq_data *cfqd = cic_to_cfqd(cic);
struct cfq_queue *cfqq;
- if (unlikely(!cfqd))
+ /*
+ * Check whether ioprio has changed. The condition may trigger
+ * spuriously on a newly created cic but there's no harm.
+ */
+ if (unlikely(!cfqd) || likely(cic->ioprio == ioprio))
return;
cfqq = cic->cfqq[BLK_RW_ASYNC];
if (cfqq) {
struct cfq_queue *new_cfqq;
- new_cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic->icq.ioc,
- GFP_ATOMIC);
+ new_cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic, bio,
+ GFP_ATOMIC);
if (new_cfqq) {
cic->cfqq[BLK_RW_ASYNC] = new_cfqq;
cfq_put_queue(cfqq);
@@ -2800,6 +3113,8 @@
cfqq = cic->cfqq[BLK_RW_SYNC];
if (cfqq)
cfq_mark_cfqq_prio_changed(cfqq);
+
+ cic->ioprio = ioprio;
}
static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
@@ -2823,17 +3138,24 @@
}
#ifdef CONFIG_CFQ_GROUP_IOSCHED
-static void changed_cgroup(struct cfq_io_cq *cic)
+static void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio)
{
- struct cfq_queue *sync_cfqq = cic_to_cfqq(cic, 1);
struct cfq_data *cfqd = cic_to_cfqd(cic);
- struct request_queue *q;
+ struct cfq_queue *sync_cfqq;
+ uint64_t id;
- if (unlikely(!cfqd))
+ rcu_read_lock();
+ id = bio_blkcg(bio)->id;
+ rcu_read_unlock();
+
+ /*
+ * Check whether blkcg has changed. The condition may trigger
+ * spuriously on a newly created cic but there's no harm.
+ */
+ if (unlikely(!cfqd) || likely(cic->blkcg_id == id))
return;
- q = cfqd->queue;
-
+ sync_cfqq = cic_to_cfqq(cic, 1);
if (sync_cfqq) {
/*
* Drop reference to sync queue. A new sync queue will be
@@ -2843,21 +3165,26 @@
cic_set_cfqq(cic, NULL, 1);
cfq_put_queue(sync_cfqq);
}
+
+ cic->blkcg_id = id;
}
+#else
+static inline void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio) { }
#endif /* CONFIG_CFQ_GROUP_IOSCHED */
static struct cfq_queue *
-cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync,
- struct io_context *ioc, gfp_t gfp_mask)
+cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic,
+ struct bio *bio, gfp_t gfp_mask)
{
+ struct blkcg *blkcg;
struct cfq_queue *cfqq, *new_cfqq = NULL;
- struct cfq_io_cq *cic;
struct cfq_group *cfqg;
retry:
- cfqg = cfq_get_cfqg(cfqd);
- cic = cfq_cic_lookup(cfqd, ioc);
- /* cic always exists here */
+ rcu_read_lock();
+
+ blkcg = bio_blkcg(bio);
+ cfqg = cfq_lookup_create_cfqg(cfqd, blkcg);
cfqq = cic_to_cfqq(cic, is_sync);
/*
@@ -2870,6 +3197,7 @@
cfqq = new_cfqq;
new_cfqq = NULL;
} else if (gfp_mask & __GFP_WAIT) {
+ rcu_read_unlock();
spin_unlock_irq(cfqd->queue->queue_lock);
new_cfqq = kmem_cache_alloc_node(cfq_pool,
gfp_mask | __GFP_ZERO,
@@ -2885,7 +3213,7 @@
if (cfqq) {
cfq_init_cfqq(cfqd, cfqq, current->pid, is_sync);
- cfq_init_prio_data(cfqq, ioc);
+ cfq_init_prio_data(cfqq, cic);
cfq_link_cfqq_cfqg(cfqq, cfqg);
cfq_log_cfqq(cfqd, cfqq, "alloced");
} else
@@ -2895,6 +3223,7 @@
if (new_cfqq)
kmem_cache_free(cfq_pool, new_cfqq);
+ rcu_read_unlock();
return cfqq;
}
@@ -2904,6 +3233,9 @@
switch (ioprio_class) {
case IOPRIO_CLASS_RT:
return &cfqd->async_cfqq[0][ioprio];
+ case IOPRIO_CLASS_NONE:
+ ioprio = IOPRIO_NORM;
+ /* fall through */
case IOPRIO_CLASS_BE:
return &cfqd->async_cfqq[1][ioprio];
case IOPRIO_CLASS_IDLE:
@@ -2914,11 +3246,11 @@
}
static struct cfq_queue *
-cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct io_context *ioc,
- gfp_t gfp_mask)
+cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic,
+ struct bio *bio, gfp_t gfp_mask)
{
- const int ioprio = task_ioprio(ioc);
- const int ioprio_class = task_ioprio_class(ioc);
+ const int ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio);
+ const int ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
struct cfq_queue **async_cfqq = NULL;
struct cfq_queue *cfqq = NULL;
@@ -2928,7 +3260,7 @@
}
if (!cfqq)
- cfqq = cfq_find_alloc_queue(cfqd, is_sync, ioc, gfp_mask);
+ cfqq = cfq_find_alloc_queue(cfqd, is_sync, cic, bio, gfp_mask);
/*
* pin the queue now that it's allocated, scheduler exit will prune it
@@ -3010,7 +3342,7 @@
if (cfqq->next_rq && (cfqq->next_rq->cmd_flags & REQ_NOIDLE))
enable_idle = 0;
- else if (!atomic_read(&cic->icq.ioc->nr_tasks) ||
+ else if (!atomic_read(&cic->icq.ioc->active_ref) ||
!cfqd->cfq_slice_idle ||
(!cfq_cfqq_deep(cfqq) && CFQQ_SEEKY(cfqq)))
enable_idle = 0;
@@ -3174,8 +3506,7 @@
cfq_clear_cfqq_wait_request(cfqq);
__blk_run_queue(cfqd->queue);
} else {
- cfq_blkiocg_update_idle_time_stats(
- &cfqq->cfqg->blkg);
+ cfqg_stats_update_idle_time(cfqq->cfqg);
cfq_mark_cfqq_must_dispatch(cfqq);
}
}
@@ -3197,14 +3528,13 @@
struct cfq_queue *cfqq = RQ_CFQQ(rq);
cfq_log_cfqq(cfqd, cfqq, "insert_request");
- cfq_init_prio_data(cfqq, RQ_CIC(rq)->icq.ioc);
+ cfq_init_prio_data(cfqq, RQ_CIC(rq));
rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]);
list_add_tail(&rq->queuelist, &cfqq->fifo);
cfq_add_rq_rb(rq);
- cfq_blkiocg_update_io_add_stats(&(RQ_CFQG(rq))->blkg,
- &cfqd->serving_group->blkg, rq_data_dir(rq),
- rq_is_sync(rq));
+ cfqg_stats_update_io_add(RQ_CFQG(rq), cfqd->serving_group,
+ rq->cmd_flags);
cfq_rq_enqueued(cfqd, cfqq, rq);
}
@@ -3300,9 +3630,8 @@
cfqd->rq_in_driver--;
cfqq->dispatched--;
(RQ_CFQG(rq))->dispatched--;
- cfq_blkiocg_update_completion_stats(&cfqq->cfqg->blkg,
- rq_start_time_ns(rq), rq_io_start_time_ns(rq),
- rq_data_dir(rq), rq_is_sync(rq));
+ cfqg_stats_update_completion(cfqq->cfqg, rq_start_time_ns(rq),
+ rq_io_start_time_ns(rq), rq->cmd_flags);
cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--;
@@ -3399,7 +3728,7 @@
cfqq = cic_to_cfqq(cic, rw_is_sync(rw));
if (cfqq) {
- cfq_init_prio_data(cfqq, cic->icq.ioc);
+ cfq_init_prio_data(cfqq, cic);
return __cfq_may_queue(cfqq);
}
@@ -3421,7 +3750,7 @@
cfqq->allocated[rw]--;
/* Put down rq reference on cfqg */
- cfq_put_cfqg(RQ_CFQG(rq));
+ cfqg_put(RQ_CFQG(rq));
rq->elv.priv[0] = NULL;
rq->elv.priv[1] = NULL;
@@ -3465,32 +3794,25 @@
* Allocate cfq data structures associated with this request.
*/
static int
-cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
+cfq_set_request(struct request_queue *q, struct request *rq, struct bio *bio,
+ gfp_t gfp_mask)
{
struct cfq_data *cfqd = q->elevator->elevator_data;
struct cfq_io_cq *cic = icq_to_cic(rq->elv.icq);
const int rw = rq_data_dir(rq);
const bool is_sync = rq_is_sync(rq);
struct cfq_queue *cfqq;
- unsigned int changed;
might_sleep_if(gfp_mask & __GFP_WAIT);
spin_lock_irq(q->queue_lock);
- /* handle changed notifications */
- changed = icq_get_changed(&cic->icq);
- if (unlikely(changed & ICQ_IOPRIO_CHANGED))
- changed_ioprio(cic);
-#ifdef CONFIG_CFQ_GROUP_IOSCHED
- if (unlikely(changed & ICQ_CGROUP_CHANGED))
- changed_cgroup(cic);
-#endif
-
+ check_ioprio_changed(cic, bio);
+ check_blkcg_changed(cic, bio);
new_queue:
cfqq = cic_to_cfqq(cic, is_sync);
if (!cfqq || cfqq == &cfqd->oom_cfqq) {
- cfqq = cfq_get_queue(cfqd, is_sync, cic->icq.ioc, gfp_mask);
+ cfqq = cfq_get_queue(cfqd, is_sync, cic, bio, gfp_mask);
cic_set_cfqq(cic, cfqq, is_sync);
} else {
/*
@@ -3516,8 +3838,9 @@
cfqq->allocated[rw]++;
cfqq->ref++;
+ cfqg_get(cfqq->cfqg);
rq->elv.priv[0] = cfqq;
- rq->elv.priv[1] = cfq_ref_get_cfqg(cfqq->cfqg);
+ rq->elv.priv[1] = cfqq->cfqg;
spin_unlock_irq(q->queue_lock);
return 0;
}
@@ -3614,7 +3937,6 @@
{
struct cfq_data *cfqd = e->elevator_data;
struct request_queue *q = cfqd->queue;
- bool wait = false;
cfq_shutdown_timer_wq(cfqd);
@@ -3624,89 +3946,52 @@
__cfq_slice_expired(cfqd, cfqd->active_queue, 0);
cfq_put_async_queues(cfqd);
- cfq_release_cfq_groups(cfqd);
-
- /*
- * If there are groups which we could not unlink from blkcg list,
- * wait for a rcu period for them to be freed.
- */
- if (cfqd->nr_blkcg_linked_grps)
- wait = true;
spin_unlock_irq(q->queue_lock);
cfq_shutdown_timer_wq(cfqd);
- /*
- * Wait for cfqg->blkg->key accessors to exit their grace periods.
- * Do this wait only if there are other unlinked groups out
- * there. This can happen if cgroup deletion path claimed the
- * responsibility of cleaning up a group before queue cleanup code
- * get to the group.
- *
- * Do not call synchronize_rcu() unconditionally as there are drivers
- * which create/delete request queue hundreds of times during scan/boot
- * and synchronize_rcu() can take significant time and slow down boot.
- */
- if (wait)
- synchronize_rcu();
-
-#ifdef CONFIG_CFQ_GROUP_IOSCHED
- /* Free up per cpu stats for root group */
- free_percpu(cfqd->root_group.blkg.stats_cpu);
+#ifndef CONFIG_CFQ_GROUP_IOSCHED
+ kfree(cfqd->root_group);
#endif
+ blkcg_deactivate_policy(q, &blkcg_policy_cfq);
kfree(cfqd);
}
-static void *cfq_init_queue(struct request_queue *q)
+static int cfq_init_queue(struct request_queue *q)
{
struct cfq_data *cfqd;
- int i, j;
- struct cfq_group *cfqg;
- struct cfq_rb_root *st;
+ struct blkcg_gq *blkg __maybe_unused;
+ int i, ret;
cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node);
if (!cfqd)
- return NULL;
+ return -ENOMEM;
+
+ cfqd->queue = q;
+ q->elevator->elevator_data = cfqd;
/* Init root service tree */
cfqd->grp_service_tree = CFQ_RB_ROOT;
- /* Init root group */
- cfqg = &cfqd->root_group;
- for_each_cfqg_st(cfqg, i, j, st)
- *st = CFQ_RB_ROOT;
- RB_CLEAR_NODE(&cfqg->rb_node);
-
- /* Give preference to root group over other groups */
- cfqg->weight = 2*BLKIO_WEIGHT_DEFAULT;
-
+ /* Init root group and prefer root group over other groups by default */
#ifdef CONFIG_CFQ_GROUP_IOSCHED
- /*
- * Set root group reference to 2. One reference will be dropped when
- * all groups on cfqd->cfqg_list are being deleted during queue exit.
- * Other reference will remain there as we don't want to delete this
- * group as it is statically allocated and gets destroyed when
- * throtl_data goes away.
- */
- cfqg->ref = 2;
+ ret = blkcg_activate_policy(q, &blkcg_policy_cfq);
+ if (ret)
+ goto out_free;
- if (blkio_alloc_blkg_stats(&cfqg->blkg)) {
- kfree(cfqg);
- kfree(cfqd);
- return NULL;
- }
+ cfqd->root_group = blkg_to_cfqg(q->root_blkg);
+#else
+ ret = -ENOMEM;
+ cfqd->root_group = kzalloc_node(sizeof(*cfqd->root_group),
+ GFP_KERNEL, cfqd->queue->node);
+ if (!cfqd->root_group)
+ goto out_free;
- rcu_read_lock();
-
- cfq_blkiocg_add_blkio_group(&blkio_root_cgroup, &cfqg->blkg,
- (void *)cfqd, 0);
- rcu_read_unlock();
- cfqd->nr_blkcg_linked_grps++;
-
- /* Add group on cfqd->cfqg_list */
- hlist_add_head(&cfqg->cfqd_node, &cfqd->cfqg_list);
+ cfq_init_cfqg_base(cfqd->root_group);
#endif
+ cfqd->root_group->weight = 2 * CFQ_WEIGHT_DEFAULT;
+
/*
* Not strictly needed (since RB_ROOT just clears the node and we
* zeroed cfqd on alloc), but better be safe in case someone decides
@@ -3718,13 +4003,17 @@
/*
* Our fallback cfqq if cfq_find_alloc_queue() runs into OOM issues.
* Grab a permanent reference to it, so that the normal code flow
- * will not attempt to free it.
+ * will not attempt to free it. oom_cfqq is linked to root_group
+ * but shouldn't hold a reference as it'll never be unlinked. Lose
+ * the reference from linking right away.
*/
cfq_init_cfqq(cfqd, &cfqd->oom_cfqq, 1, 0);
cfqd->oom_cfqq.ref++;
- cfq_link_cfqq_cfqg(&cfqd->oom_cfqq, &cfqd->root_group);
- cfqd->queue = q;
+ spin_lock_irq(q->queue_lock);
+ cfq_link_cfqq_cfqg(&cfqd->oom_cfqq, cfqd->root_group);
+ cfqg_put(cfqd->root_group);
+ spin_unlock_irq(q->queue_lock);
init_timer(&cfqd->idle_slice_timer);
cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
@@ -3750,7 +4039,11 @@
* second, in order to have larger depth for async operations.
*/
cfqd->last_delayed_sync = jiffies - HZ;
- return cfqd;
+ return 0;
+
+out_free:
+ kfree(cfqd);
+ return ret;
}
/*
@@ -3877,15 +4170,13 @@
};
#ifdef CONFIG_CFQ_GROUP_IOSCHED
-static struct blkio_policy_type blkio_policy_cfq = {
- .ops = {
- .blkio_unlink_group_fn = cfq_unlink_blkio_group,
- .blkio_update_group_weight_fn = cfq_update_blkio_group_weight,
- },
- .plid = BLKIO_POLICY_PROP,
+static struct blkcg_policy blkcg_policy_cfq = {
+ .pd_size = sizeof(struct cfq_group),
+ .cftypes = cfq_blkcg_files,
+
+ .pd_init_fn = cfq_pd_init,
+ .pd_reset_stats_fn = cfq_pd_reset_stats,
};
-#else
-static struct blkio_policy_type blkio_policy_cfq;
#endif
static int __init cfq_init(void)
@@ -3906,24 +4197,31 @@
#else
cfq_group_idle = 0;
#endif
+
+ ret = blkcg_policy_register(&blkcg_policy_cfq);
+ if (ret)
+ return ret;
+
cfq_pool = KMEM_CACHE(cfq_queue, 0);
if (!cfq_pool)
- return -ENOMEM;
+ goto err_pol_unreg;
ret = elv_register(&iosched_cfq);
- if (ret) {
- kmem_cache_destroy(cfq_pool);
- return ret;
- }
-
- blkio_policy_register(&blkio_policy_cfq);
+ if (ret)
+ goto err_free_pool;
return 0;
+
+err_free_pool:
+ kmem_cache_destroy(cfq_pool);
+err_pol_unreg:
+ blkcg_policy_unregister(&blkcg_policy_cfq);
+ return ret;
}
static void __exit cfq_exit(void)
{
- blkio_policy_unregister(&blkio_policy_cfq);
+ blkcg_policy_unregister(&blkcg_policy_cfq);
elv_unregister(&iosched_cfq);
kmem_cache_destroy(cfq_pool);
}
diff --git a/block/cfq.h b/block/cfq.h
deleted file mode 100644
index 2a15592..0000000
--- a/block/cfq.h
+++ /dev/null
@@ -1,115 +0,0 @@
-#ifndef _CFQ_H
-#define _CFQ_H
-#include "blk-cgroup.h"
-
-#ifdef CONFIG_CFQ_GROUP_IOSCHED
-static inline void cfq_blkiocg_update_io_add_stats(struct blkio_group *blkg,
- struct blkio_group *curr_blkg, bool direction, bool sync)
-{
- blkiocg_update_io_add_stats(blkg, curr_blkg, direction, sync);
-}
-
-static inline void cfq_blkiocg_update_dequeue_stats(struct blkio_group *blkg,
- unsigned long dequeue)
-{
- blkiocg_update_dequeue_stats(blkg, dequeue);
-}
-
-static inline void cfq_blkiocg_update_timeslice_used(struct blkio_group *blkg,
- unsigned long time, unsigned long unaccounted_time)
-{
- blkiocg_update_timeslice_used(blkg, time, unaccounted_time);
-}
-
-static inline void cfq_blkiocg_set_start_empty_time(struct blkio_group *blkg)
-{
- blkiocg_set_start_empty_time(blkg);
-}
-
-static inline void cfq_blkiocg_update_io_remove_stats(struct blkio_group *blkg,
- bool direction, bool sync)
-{
- blkiocg_update_io_remove_stats(blkg, direction, sync);
-}
-
-static inline void cfq_blkiocg_update_io_merged_stats(struct blkio_group *blkg,
- bool direction, bool sync)
-{
- blkiocg_update_io_merged_stats(blkg, direction, sync);
-}
-
-static inline void cfq_blkiocg_update_idle_time_stats(struct blkio_group *blkg)
-{
- blkiocg_update_idle_time_stats(blkg);
-}
-
-static inline void
-cfq_blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg)
-{
- blkiocg_update_avg_queue_size_stats(blkg);
-}
-
-static inline void
-cfq_blkiocg_update_set_idle_time_stats(struct blkio_group *blkg)
-{
- blkiocg_update_set_idle_time_stats(blkg);
-}
-
-static inline void cfq_blkiocg_update_dispatch_stats(struct blkio_group *blkg,
- uint64_t bytes, bool direction, bool sync)
-{
- blkiocg_update_dispatch_stats(blkg, bytes, direction, sync);
-}
-
-static inline void cfq_blkiocg_update_completion_stats(struct blkio_group *blkg, uint64_t start_time, uint64_t io_start_time, bool direction, bool sync)
-{
- blkiocg_update_completion_stats(blkg, start_time, io_start_time,
- direction, sync);
-}
-
-static inline void cfq_blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
- struct blkio_group *blkg, void *key, dev_t dev) {
- blkiocg_add_blkio_group(blkcg, blkg, key, dev, BLKIO_POLICY_PROP);
-}
-
-static inline int cfq_blkiocg_del_blkio_group(struct blkio_group *blkg)
-{
- return blkiocg_del_blkio_group(blkg);
-}
-
-#else /* CFQ_GROUP_IOSCHED */
-static inline void cfq_blkiocg_update_io_add_stats(struct blkio_group *blkg,
- struct blkio_group *curr_blkg, bool direction, bool sync) {}
-
-static inline void cfq_blkiocg_update_dequeue_stats(struct blkio_group *blkg,
- unsigned long dequeue) {}
-
-static inline void cfq_blkiocg_update_timeslice_used(struct blkio_group *blkg,
- unsigned long time, unsigned long unaccounted_time) {}
-static inline void cfq_blkiocg_set_start_empty_time(struct blkio_group *blkg) {}
-static inline void cfq_blkiocg_update_io_remove_stats(struct blkio_group *blkg,
- bool direction, bool sync) {}
-static inline void cfq_blkiocg_update_io_merged_stats(struct blkio_group *blkg,
- bool direction, bool sync) {}
-static inline void cfq_blkiocg_update_idle_time_stats(struct blkio_group *blkg)
-{
-}
-static inline void
-cfq_blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg) {}
-
-static inline void
-cfq_blkiocg_update_set_idle_time_stats(struct blkio_group *blkg) {}
-
-static inline void cfq_blkiocg_update_dispatch_stats(struct blkio_group *blkg,
- uint64_t bytes, bool direction, bool sync) {}
-static inline void cfq_blkiocg_update_completion_stats(struct blkio_group *blkg, uint64_t start_time, uint64_t io_start_time, bool direction, bool sync) {}
-
-static inline void cfq_blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
- struct blkio_group *blkg, void *key, dev_t dev) {}
-static inline int cfq_blkiocg_del_blkio_group(struct blkio_group *blkg)
-{
- return 0;
-}
-
-#endif /* CFQ_GROUP_IOSCHED */
-#endif
diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c
index 7bf12d7..599b12e 100644
--- a/block/deadline-iosched.c
+++ b/block/deadline-iosched.c
@@ -337,13 +337,13 @@
/*
* initialize elevator private data (deadline_data).
*/
-static void *deadline_init_queue(struct request_queue *q)
+static int deadline_init_queue(struct request_queue *q)
{
struct deadline_data *dd;
dd = kmalloc_node(sizeof(*dd), GFP_KERNEL | __GFP_ZERO, q->node);
if (!dd)
- return NULL;
+ return -ENOMEM;
INIT_LIST_HEAD(&dd->fifo_list[READ]);
INIT_LIST_HEAD(&dd->fifo_list[WRITE]);
@@ -354,7 +354,9 @@
dd->writes_starved = writes_starved;
dd->front_merges = 1;
dd->fifo_batch = fifo_batch;
- return dd;
+
+ q->elevator->elevator_data = dd;
+ return 0;
}
/*
diff --git a/block/elevator.c b/block/elevator.c
index f016855..6a55d41 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -38,6 +38,7 @@
#include <trace/events/block.h>
#include "blk.h"
+#include "blk-cgroup.h"
static DEFINE_SPINLOCK(elv_list_lock);
static LIST_HEAD(elv_list);
@@ -121,15 +122,6 @@
return e;
}
-static int elevator_init_queue(struct request_queue *q,
- struct elevator_queue *eq)
-{
- eq->elevator_data = eq->type->ops.elevator_init_fn(q);
- if (eq->elevator_data)
- return 0;
- return -ENOMEM;
-}
-
static char chosen_elevator[ELV_NAME_MAX];
static int __init elevator_setup(char *str)
@@ -188,7 +180,6 @@
int elevator_init(struct request_queue *q, char *name)
{
struct elevator_type *e = NULL;
- struct elevator_queue *eq;
int err;
if (unlikely(q->elevator))
@@ -222,17 +213,16 @@
}
}
- eq = elevator_alloc(q, e);
- if (!eq)
+ q->elevator = elevator_alloc(q, e);
+ if (!q->elevator)
return -ENOMEM;
- err = elevator_init_queue(q, eq);
+ err = e->ops.elevator_init_fn(q);
if (err) {
- kobject_put(&eq->kobj);
+ kobject_put(&q->elevator->kobj);
return err;
}
- q->elevator = eq;
return 0;
}
EXPORT_SYMBOL(elevator_init);
@@ -564,25 +554,6 @@
}
}
-void elv_quiesce_start(struct request_queue *q)
-{
- if (!q->elevator)
- return;
-
- spin_lock_irq(q->queue_lock);
- queue_flag_set(QUEUE_FLAG_ELVSWITCH, q);
- spin_unlock_irq(q->queue_lock);
-
- blk_drain_queue(q, false);
-}
-
-void elv_quiesce_end(struct request_queue *q)
-{
- spin_lock_irq(q->queue_lock);
- queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q);
- spin_unlock_irq(q->queue_lock);
-}
-
void __elv_add_request(struct request_queue *q, struct request *rq, int where)
{
trace_block_rq_insert(q, rq);
@@ -692,12 +663,13 @@
return NULL;
}
-int elv_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
+int elv_set_request(struct request_queue *q, struct request *rq,
+ struct bio *bio, gfp_t gfp_mask)
{
struct elevator_queue *e = q->elevator;
if (e->type->ops.elevator_set_req_fn)
- return e->type->ops.elevator_set_req_fn(q, rq, gfp_mask);
+ return e->type->ops.elevator_set_req_fn(q, rq, bio, gfp_mask);
return 0;
}
@@ -801,8 +773,9 @@
.release = elevator_release,
};
-int __elv_register_queue(struct request_queue *q, struct elevator_queue *e)
+int elv_register_queue(struct request_queue *q)
{
+ struct elevator_queue *e = q->elevator;
int error;
error = kobject_add(&e->kobj, &q->kobj, "%s", "iosched");
@@ -820,11 +793,6 @@
}
return error;
}
-
-int elv_register_queue(struct request_queue *q)
-{
- return __elv_register_queue(q, q->elevator);
-}
EXPORT_SYMBOL(elv_register_queue);
void elv_unregister_queue(struct request_queue *q)
@@ -907,53 +875,60 @@
*/
static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
{
- struct elevator_queue *old_elevator, *e;
+ struct elevator_queue *old = q->elevator;
+ bool registered = old->registered;
int err;
- /* allocate new elevator */
- e = elevator_alloc(q, new_e);
- if (!e)
- return -ENOMEM;
+ /*
+ * Turn on BYPASS and drain all requests w/ elevator private data.
+ * Block layer doesn't call into a quiesced elevator - all requests
+ * are directly put on the dispatch list without elevator data
+ * using INSERT_BACK. All requests have SOFTBARRIER set and no
+ * merge happens either.
+ */
+ blk_queue_bypass_start(q);
- err = elevator_init_queue(q, e);
+ /* unregister and clear all auxiliary data of the old elevator */
+ if (registered)
+ elv_unregister_queue(q);
+
+ spin_lock_irq(q->queue_lock);
+ ioc_clear_queue(q);
+ spin_unlock_irq(q->queue_lock);
+
+ /* allocate, init and register new elevator */
+ err = -ENOMEM;
+ q->elevator = elevator_alloc(q, new_e);
+ if (!q->elevator)
+ goto fail_init;
+
+ err = new_e->ops.elevator_init_fn(q);
if (err) {
- kobject_put(&e->kobj);
- return err;
+ kobject_put(&q->elevator->kobj);
+ goto fail_init;
}
- /* turn on BYPASS and drain all requests w/ elevator private data */
- elv_quiesce_start(q);
-
- /* unregister old queue, register new one and kill old elevator */
- if (q->elevator->registered) {
- elv_unregister_queue(q);
- err = __elv_register_queue(q, e);
+ if (registered) {
+ err = elv_register_queue(q);
if (err)
goto fail_register;
}
- /* done, clear io_cq's, switch elevators and turn off BYPASS */
- spin_lock_irq(q->queue_lock);
- ioc_clear_queue(q);
- old_elevator = q->elevator;
- q->elevator = e;
- spin_unlock_irq(q->queue_lock);
+ /* done, kill the old one and finish */
+ elevator_exit(old);
+ blk_queue_bypass_end(q);
- elevator_exit(old_elevator);
- elv_quiesce_end(q);
-
- blk_add_trace_msg(q, "elv switch: %s", e->type->elevator_name);
+ blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name);
return 0;
fail_register:
- /*
- * switch failed, exit the new io scheduler and reattach the old
- * one again (along with re-adding the sysfs dir)
- */
- elevator_exit(e);
+ elevator_exit(q->elevator);
+fail_init:
+ /* switch failed, restore and re-register old elevator */
+ q->elevator = old;
elv_register_queue(q);
- elv_quiesce_end(q);
+ blk_queue_bypass_end(q);
return err;
}
diff --git a/block/noop-iosched.c b/block/noop-iosched.c
index 413a0b1..5d1bf70 100644
--- a/block/noop-iosched.c
+++ b/block/noop-iosched.c
@@ -59,15 +59,17 @@
return list_entry(rq->queuelist.next, struct request, queuelist);
}
-static void *noop_init_queue(struct request_queue *q)
+static int noop_init_queue(struct request_queue *q)
{
struct noop_data *nd;
nd = kmalloc_node(sizeof(*nd), GFP_KERNEL, q->node);
if (!nd)
- return NULL;
+ return -ENOMEM;
+
INIT_LIST_HEAD(&nd->queue);
- return nd;
+ q->elevator->elevator_data = nd;
+ return 0;
}
static void noop_exit_queue(struct elevator_queue *e)
diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
index e8cd652..9851093 100644
--- a/drivers/atm/solos-pci.c
+++ b/drivers/atm/solos-pci.c
@@ -984,6 +984,7 @@
} else if (skb && card->using_dma) {
SKB_CB(skb)->dma_addr = pci_map_single(card->dev, skb->data,
skb->len, PCI_DMA_TODEVICE);
+ card->tx_skb[port] = skb;
iowrite32(SKB_CB(skb)->dma_addr,
card->config_regs + TX_DMA_ADDR(port));
}
@@ -1152,7 +1153,8 @@
db_fpga_upgrade = db_firmware_upgrade = 0;
}
- if (card->fpga_version >= DMA_SUPPORTED){
+ if (card->fpga_version >= DMA_SUPPORTED) {
+ pci_set_master(dev);
card->using_dma = 1;
} else {
card->using_dma = 0;
diff --git a/drivers/base/regmap/regmap-i2c.c b/drivers/base/regmap/regmap-i2c.c
index 5f6b247..fa6bf52 100644
--- a/drivers/base/regmap/regmap-i2c.c
+++ b/drivers/base/regmap/regmap-i2c.c
@@ -42,7 +42,7 @@
/* If the I2C controller can't do a gather tell the core, it
* will substitute in a linear write for us.
*/
- if (!i2c_check_functionality(i2c->adapter, I2C_FUNC_PROTOCOL_MANGLING))
+ if (!i2c_check_functionality(i2c->adapter, I2C_FUNC_NOSTART))
return -ENOTSUPP;
xfer[0].addr = i2c->addr;
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c
index cf0e63d..e54e31b 100644
--- a/drivers/block/drbd/drbd_actlog.c
+++ b/drivers/block/drbd/drbd_actlog.c
@@ -65,39 +65,80 @@
int w_al_write_transaction(struct drbd_conf *, struct drbd_work *, int);
+void *drbd_md_get_buffer(struct drbd_conf *mdev)
+{
+ int r;
+
+ wait_event(mdev->misc_wait,
+ (r = atomic_cmpxchg(&mdev->md_io_in_use, 0, 1)) == 0 ||
+ mdev->state.disk <= D_FAILED);
+
+ return r ? NULL : page_address(mdev->md_io_page);
+}
+
+void drbd_md_put_buffer(struct drbd_conf *mdev)
+{
+ if (atomic_dec_and_test(&mdev->md_io_in_use))
+ wake_up(&mdev->misc_wait);
+}
+
+static bool md_io_allowed(struct drbd_conf *mdev)
+{
+ enum drbd_disk_state ds = mdev->state.disk;
+ return ds >= D_NEGOTIATING || ds == D_ATTACHING;
+}
+
+void wait_until_done_or_disk_failure(struct drbd_conf *mdev, struct drbd_backing_dev *bdev,
+ unsigned int *done)
+{
+ long dt = bdev->dc.disk_timeout * HZ / 10;
+ if (dt == 0)
+ dt = MAX_SCHEDULE_TIMEOUT;
+
+ dt = wait_event_timeout(mdev->misc_wait, *done || !md_io_allowed(mdev), dt);
+ if (dt == 0)
+ dev_err(DEV, "meta-data IO operation timed out\n");
+}
+
static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
struct drbd_backing_dev *bdev,
struct page *page, sector_t sector,
int rw, int size)
{
struct bio *bio;
- struct drbd_md_io md_io;
int ok;
- md_io.mdev = mdev;
- init_completion(&md_io.event);
- md_io.error = 0;
+ mdev->md_io.done = 0;
+ mdev->md_io.error = -ENODEV;
if ((rw & WRITE) && !test_bit(MD_NO_FUA, &mdev->flags))
rw |= REQ_FUA | REQ_FLUSH;
rw |= REQ_SYNC;
- bio = bio_alloc(GFP_NOIO, 1);
+ bio = bio_alloc_drbd(GFP_NOIO);
bio->bi_bdev = bdev->md_bdev;
bio->bi_sector = sector;
ok = (bio_add_page(bio, page, size, 0) == size);
if (!ok)
goto out;
- bio->bi_private = &md_io;
+ bio->bi_private = &mdev->md_io;
bio->bi_end_io = drbd_md_io_complete;
bio->bi_rw = rw;
+ if (!get_ldev_if_state(mdev, D_ATTACHING)) { /* Corresponding put_ldev in drbd_md_io_complete() */
+ dev_err(DEV, "ASSERT FAILED: get_ldev_if_state() == 1 in _drbd_md_sync_page_io()\n");
+ ok = 0;
+ goto out;
+ }
+
+ bio_get(bio); /* one bio_put() is in the completion handler */
+ atomic_inc(&mdev->md_io_in_use); /* drbd_md_put_buffer() is in the completion handler */
if (drbd_insert_fault(mdev, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD))
bio_endio(bio, -EIO);
else
submit_bio(rw, bio);
- wait_for_completion(&md_io.event);
- ok = bio_flagged(bio, BIO_UPTODATE) && md_io.error == 0;
+ wait_until_done_or_disk_failure(mdev, bdev, &mdev->md_io.done);
+ ok = bio_flagged(bio, BIO_UPTODATE) && mdev->md_io.error == 0;
out:
bio_put(bio);
@@ -111,7 +152,7 @@
int offset = 0;
struct page *iop = mdev->md_io_page;
- D_ASSERT(mutex_is_locked(&mdev->md_io_mutex));
+ D_ASSERT(atomic_read(&mdev->md_io_in_use) == 1);
BUG_ON(!bdev->md_bdev);
@@ -328,8 +369,13 @@
return 1;
}
- mutex_lock(&mdev->md_io_mutex); /* protects md_io_buffer, al_tr_cycle, ... */
- buffer = (struct al_transaction *)page_address(mdev->md_io_page);
+ buffer = drbd_md_get_buffer(mdev); /* protects md_io_buffer, al_tr_cycle, ... */
+ if (!buffer) {
+ dev_err(DEV, "disk failed while waiting for md_io buffer\n");
+ complete(&((struct update_al_work *)w)->event);
+ put_ldev(mdev);
+ return 1;
+ }
buffer->magic = __constant_cpu_to_be32(DRBD_MAGIC);
buffer->tr_number = cpu_to_be32(mdev->al_tr_number);
@@ -374,7 +420,7 @@
D_ASSERT(mdev->al_tr_pos < MD_AL_MAX_SIZE);
mdev->al_tr_number++;
- mutex_unlock(&mdev->md_io_mutex);
+ drbd_md_put_buffer(mdev);
complete(&((struct update_al_work *)w)->event);
put_ldev(mdev);
@@ -443,8 +489,9 @@
/* lock out all other meta data io for now,
* and make sure the page is mapped.
*/
- mutex_lock(&mdev->md_io_mutex);
- buffer = page_address(mdev->md_io_page);
+ buffer = drbd_md_get_buffer(mdev);
+ if (!buffer)
+ return 0;
/* Find the valid transaction in the log */
for (i = 0; i <= mx; i++) {
@@ -452,7 +499,7 @@
if (rv == 0)
continue;
if (rv == -1) {
- mutex_unlock(&mdev->md_io_mutex);
+ drbd_md_put_buffer(mdev);
return 0;
}
cnr = be32_to_cpu(buffer->tr_number);
@@ -478,7 +525,7 @@
if (!found_valid) {
dev_warn(DEV, "No usable activity log found.\n");
- mutex_unlock(&mdev->md_io_mutex);
+ drbd_md_put_buffer(mdev);
return 1;
}
@@ -493,7 +540,7 @@
rv = drbd_al_read_tr(mdev, bdev, buffer, i);
ERR_IF(rv == 0) goto cancel;
if (rv == -1) {
- mutex_unlock(&mdev->md_io_mutex);
+ drbd_md_put_buffer(mdev);
return 0;
}
@@ -534,7 +581,7 @@
mdev->al_tr_pos = 0;
/* ok, we are done with it */
- mutex_unlock(&mdev->md_io_mutex);
+ drbd_md_put_buffer(mdev);
dev_info(DEV, "Found %d transactions (%d active extents) in activity log.\n",
transactions, active_extents);
@@ -671,16 +718,20 @@
else
ext->rs_failed += count;
if (ext->rs_left < ext->rs_failed) {
- dev_err(DEV, "BAD! sector=%llus enr=%u rs_left=%d "
- "rs_failed=%d count=%d\n",
+ dev_warn(DEV, "BAD! sector=%llus enr=%u rs_left=%d "
+ "rs_failed=%d count=%d cstate=%s\n",
(unsigned long long)sector,
ext->lce.lc_number, ext->rs_left,
- ext->rs_failed, count);
- dump_stack();
+ ext->rs_failed, count,
+ drbd_conn_str(mdev->state.conn));
- lc_put(mdev->resync, &ext->lce);
- drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
- return;
+ /* We don't expect to be able to clear more bits
+ * than have been set when we originally counted
+ * the set bits to cache that value in ext->rs_left.
+ * Whatever the reason (disconnect during resync,
+ * delayed local completion of an application write),
+ * try to fix it up by recounting here. */
+ ext->rs_left = drbd_bm_e_weight(mdev, enr);
}
} else {
/* Normally this element should be in the cache,
@@ -1192,6 +1243,7 @@
put_ldev(mdev);
}
spin_unlock_irq(&mdev->al_lock);
+ wake_up(&mdev->al_wait);
return 0;
}
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
index 3030201..b5c5ff5 100644
--- a/drivers/block/drbd/drbd_bitmap.c
+++ b/drivers/block/drbd/drbd_bitmap.c
@@ -205,7 +205,7 @@
static void bm_store_page_idx(struct page *page, unsigned long idx)
{
BUG_ON(0 != (idx & ~BM_PAGE_IDX_MASK));
- page_private(page) |= idx;
+ set_page_private(page, idx);
}
static unsigned long bm_page_to_idx(struct page *page)
@@ -886,12 +886,21 @@
struct bm_aio_ctx {
struct drbd_conf *mdev;
atomic_t in_flight;
- struct completion done;
+ unsigned int done;
unsigned flags;
#define BM_AIO_COPY_PAGES 1
int error;
+ struct kref kref;
};
+static void bm_aio_ctx_destroy(struct kref *kref)
+{
+ struct bm_aio_ctx *ctx = container_of(kref, struct bm_aio_ctx, kref);
+
+ put_ldev(ctx->mdev);
+ kfree(ctx);
+}
+
/* bv_page may be a copy, or may be the original */
static void bm_async_io_complete(struct bio *bio, int error)
{
@@ -930,20 +939,21 @@
bm_page_unlock_io(mdev, idx);
- /* FIXME give back to page pool */
if (ctx->flags & BM_AIO_COPY_PAGES)
- put_page(bio->bi_io_vec[0].bv_page);
+ mempool_free(bio->bi_io_vec[0].bv_page, drbd_md_io_page_pool);
bio_put(bio);
- if (atomic_dec_and_test(&ctx->in_flight))
- complete(&ctx->done);
+ if (atomic_dec_and_test(&ctx->in_flight)) {
+ ctx->done = 1;
+ wake_up(&mdev->misc_wait);
+ kref_put(&ctx->kref, &bm_aio_ctx_destroy);
+ }
}
static void bm_page_io_async(struct bm_aio_ctx *ctx, int page_nr, int rw) __must_hold(local)
{
- /* we are process context. we always get a bio */
- struct bio *bio = bio_alloc(GFP_KERNEL, 1);
+ struct bio *bio = bio_alloc_drbd(GFP_NOIO);
struct drbd_conf *mdev = ctx->mdev;
struct drbd_bitmap *b = mdev->bitmap;
struct page *page;
@@ -966,10 +976,8 @@
bm_set_page_unchanged(b->bm_pages[page_nr]);
if (ctx->flags & BM_AIO_COPY_PAGES) {
- /* FIXME alloc_page is good enough for now, but actually needs
- * to use pre-allocated page pool */
void *src, *dest;
- page = alloc_page(__GFP_HIGHMEM|__GFP_WAIT);
+ page = mempool_alloc(drbd_md_io_page_pool, __GFP_HIGHMEM|__GFP_WAIT);
dest = kmap_atomic(page);
src = kmap_atomic(b->bm_pages[page_nr]);
memcpy(dest, src, PAGE_SIZE);
@@ -981,6 +989,8 @@
bio->bi_bdev = mdev->ldev->md_bdev;
bio->bi_sector = on_disk_sector;
+ /* bio_add_page of a single page to an empty bio will always succeed,
+ * according to api. Do we want to assert that? */
bio_add_page(bio, page, len, 0);
bio->bi_private = ctx;
bio->bi_end_io = bm_async_io_complete;
@@ -999,14 +1009,9 @@
/*
* bm_rw: read/write the whole bitmap from/to its on disk location.
*/
-static int bm_rw(struct drbd_conf *mdev, int rw, unsigned lazy_writeout_upper_idx) __must_hold(local)
+static int bm_rw(struct drbd_conf *mdev, int rw, unsigned flags, unsigned lazy_writeout_upper_idx) __must_hold(local)
{
- struct bm_aio_ctx ctx = {
- .mdev = mdev,
- .in_flight = ATOMIC_INIT(1),
- .done = COMPLETION_INITIALIZER_ONSTACK(ctx.done),
- .flags = lazy_writeout_upper_idx ? BM_AIO_COPY_PAGES : 0,
- };
+ struct bm_aio_ctx *ctx;
struct drbd_bitmap *b = mdev->bitmap;
int num_pages, i, count = 0;
unsigned long now;
@@ -1021,7 +1026,27 @@
* For lazy writeout, we don't care for ongoing changes to the bitmap,
* as we submit copies of pages anyways.
*/
- if (!ctx.flags)
+
+ ctx = kmalloc(sizeof(struct bm_aio_ctx), GFP_NOIO);
+ if (!ctx)
+ return -ENOMEM;
+
+ *ctx = (struct bm_aio_ctx) {
+ .mdev = mdev,
+ .in_flight = ATOMIC_INIT(1),
+ .done = 0,
+ .flags = flags,
+ .error = 0,
+ .kref = { ATOMIC_INIT(2) },
+ };
+
+ if (!get_ldev_if_state(mdev, D_ATTACHING)) { /* put is in bm_aio_ctx_destroy() */
+ dev_err(DEV, "ASSERT FAILED: get_ldev_if_state() == 1 in bm_rw()\n");
+ kfree(ctx);
+ return -ENODEV;
+ }
+
+ if (!ctx->flags)
WARN_ON(!(BM_LOCKED_MASK & b->bm_flags));
num_pages = b->bm_number_of_pages;
@@ -1046,29 +1071,38 @@
continue;
}
}
- atomic_inc(&ctx.in_flight);
- bm_page_io_async(&ctx, i, rw);
+ atomic_inc(&ctx->in_flight);
+ bm_page_io_async(ctx, i, rw);
++count;
cond_resched();
}
/*
- * We initialize ctx.in_flight to one to make sure bm_async_io_complete
- * will not complete() early, and decrement / test it here. If there
+ * We initialize ctx->in_flight to one to make sure bm_async_io_complete
+ * will not set ctx->done early, and decrement / test it here. If there
* are still some bios in flight, we need to wait for them here.
+ * If all IO is done already (or nothing had been submitted), there is
+ * no need to wait. Still, we need to put the kref associated with the
+ * "in_flight reached zero, all done" event.
*/
- if (!atomic_dec_and_test(&ctx.in_flight))
- wait_for_completion(&ctx.done);
+ if (!atomic_dec_and_test(&ctx->in_flight))
+ wait_until_done_or_disk_failure(mdev, mdev->ldev, &ctx->done);
+ else
+ kref_put(&ctx->kref, &bm_aio_ctx_destroy);
+
dev_info(DEV, "bitmap %s of %u pages took %lu jiffies\n",
rw == WRITE ? "WRITE" : "READ",
count, jiffies - now);
- if (ctx.error) {
+ if (ctx->error) {
dev_alert(DEV, "we had at least one MD IO ERROR during bitmap IO\n");
drbd_chk_io_error(mdev, 1, true);
- err = -EIO; /* ctx.error ? */
+ err = -EIO; /* ctx->error ? */
}
+ if (atomic_read(&ctx->in_flight))
+ err = -EIO; /* Disk failed during IO... */
+
now = jiffies;
if (rw == WRITE) {
drbd_md_flush(mdev);
@@ -1082,6 +1116,7 @@
dev_info(DEV, "%s (%lu bits) marked out-of-sync by on disk bit-map.\n",
ppsize(ppb, now << (BM_BLOCK_SHIFT-10)), now);
+ kref_put(&ctx->kref, &bm_aio_ctx_destroy);
return err;
}
@@ -1091,7 +1126,7 @@
*/
int drbd_bm_read(struct drbd_conf *mdev) __must_hold(local)
{
- return bm_rw(mdev, READ, 0);
+ return bm_rw(mdev, READ, 0, 0);
}
/**
@@ -1102,7 +1137,7 @@
*/
int drbd_bm_write(struct drbd_conf *mdev) __must_hold(local)
{
- return bm_rw(mdev, WRITE, 0);
+ return bm_rw(mdev, WRITE, 0, 0);
}
/**
@@ -1112,7 +1147,23 @@
*/
int drbd_bm_write_lazy(struct drbd_conf *mdev, unsigned upper_idx) __must_hold(local)
{
- return bm_rw(mdev, WRITE, upper_idx);
+ return bm_rw(mdev, WRITE, BM_AIO_COPY_PAGES, upper_idx);
+}
+
+/**
+ * drbd_bm_write_copy_pages() - Write the whole bitmap to its on disk location.
+ * @mdev: DRBD device.
+ *
+ * Will only write pages that have changed since last IO.
+ * In contrast to drbd_bm_write(), this will copy the bitmap pages
+ * to temporary writeout pages. It is intended to trigger a full write-out
+ * while still allowing the bitmap to change, for example if a resync or online
+ * verify is aborted due to a failed peer disk, while local IO continues, or
+ * pending resync acks are still being processed.
+ */
+int drbd_bm_write_copy_pages(struct drbd_conf *mdev) __must_hold(local)
+{
+ return bm_rw(mdev, WRITE, BM_AIO_COPY_PAGES, 0);
}
@@ -1130,28 +1181,45 @@
*/
int drbd_bm_write_page(struct drbd_conf *mdev, unsigned int idx) __must_hold(local)
{
- struct bm_aio_ctx ctx = {
- .mdev = mdev,
- .in_flight = ATOMIC_INIT(1),
- .done = COMPLETION_INITIALIZER_ONSTACK(ctx.done),
- .flags = BM_AIO_COPY_PAGES,
- };
+ struct bm_aio_ctx *ctx;
+ int err;
if (bm_test_page_unchanged(mdev->bitmap->bm_pages[idx])) {
dynamic_dev_dbg(DEV, "skipped bm page write for idx %u\n", idx);
return 0;
}
- bm_page_io_async(&ctx, idx, WRITE_SYNC);
- wait_for_completion(&ctx.done);
+ ctx = kmalloc(sizeof(struct bm_aio_ctx), GFP_NOIO);
+ if (!ctx)
+ return -ENOMEM;
- if (ctx.error)
+ *ctx = (struct bm_aio_ctx) {
+ .mdev = mdev,
+ .in_flight = ATOMIC_INIT(1),
+ .done = 0,
+ .flags = BM_AIO_COPY_PAGES,
+ .error = 0,
+ .kref = { ATOMIC_INIT(2) },
+ };
+
+ if (!get_ldev_if_state(mdev, D_ATTACHING)) { /* put is in bm_aio_ctx_destroy() */
+ dev_err(DEV, "ASSERT FAILED: get_ldev_if_state() == 1 in drbd_bm_write_page()\n");
+ kfree(ctx);
+ return -ENODEV;
+ }
+
+ bm_page_io_async(ctx, idx, WRITE_SYNC);
+ wait_until_done_or_disk_failure(mdev, mdev->ldev, &ctx->done);
+
+ if (ctx->error)
drbd_chk_io_error(mdev, 1, true);
/* that should force detach, so the in memory bitmap will be
* gone in a moment as well. */
mdev->bm_writ_cnt++;
- return ctx.error;
+ err = atomic_read(&ctx->in_flight) ? -EIO : ctx->error;
+ kref_put(&ctx->kref, &bm_aio_ctx_destroy);
+ return err;
}
/* NOTE
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index 8d68056..02f013a 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -712,7 +712,6 @@
struct list_head tl_requests; /* ring list in the transfer log */
struct bio *master_bio; /* master bio pointer */
unsigned long rq_state; /* see comments above _req_mod() */
- int seq_num;
unsigned long start_time;
};
@@ -851,6 +850,7 @@
NEW_CUR_UUID, /* Create new current UUID when thawing IO */
AL_SUSPENDED, /* Activity logging is currently suspended. */
AHEAD_TO_SYNC_SOURCE, /* Ahead -> SyncSource queued */
+ STATE_SENT, /* Do not change state/UUIDs while this is set */
};
struct drbd_bitmap; /* opaque for drbd_conf */
@@ -862,31 +862,30 @@
BM_P_VMALLOCED = 0x10000, /* internal use only, will be masked out */
/* currently locked for bulk operation */
- BM_LOCKED_MASK = 0x7,
+ BM_LOCKED_MASK = 0xf,
/* in detail, that is: */
BM_DONT_CLEAR = 0x1,
BM_DONT_SET = 0x2,
BM_DONT_TEST = 0x4,
+ /* so we can mark it locked for bulk operation,
+ * and still allow all non-bulk operations */
+ BM_IS_LOCKED = 0x8,
+
/* (test bit, count bit) allowed (common case) */
- BM_LOCKED_TEST_ALLOWED = 0x3,
+ BM_LOCKED_TEST_ALLOWED = BM_DONT_CLEAR | BM_DONT_SET | BM_IS_LOCKED,
/* testing bits, as well as setting new bits allowed, but clearing bits
* would be unexpected. Used during bitmap receive. Setting new bits
* requires sending of "out-of-sync" information, though. */
- BM_LOCKED_SET_ALLOWED = 0x1,
+ BM_LOCKED_SET_ALLOWED = BM_DONT_CLEAR | BM_IS_LOCKED,
- /* clear is not expected while bitmap is locked for bulk operation */
+ /* for drbd_bm_write_copy_pages, everything is allowed,
+ * only concurrent bulk operations are locked out. */
+ BM_LOCKED_CHANGE_ALLOWED = BM_IS_LOCKED,
};
-
-/* TODO sort members for performance
- * MAYBE group them further */
-
-/* THINK maybe we actually want to use the default "event/%s" worker threads
- * or similar in linux 2.6, which uses per cpu data and threads.
- */
struct drbd_work_queue {
struct list_head q;
struct semaphore s; /* producers up it, worker down()s it */
@@ -938,8 +937,7 @@
};
struct drbd_md_io {
- struct drbd_conf *mdev;
- struct completion event;
+ unsigned int done;
int error;
};
@@ -1022,6 +1020,7 @@
struct drbd_tl_epoch *newest_tle;
struct drbd_tl_epoch *oldest_tle;
struct list_head out_of_sequence_requests;
+ struct list_head barrier_acked_requests;
struct hlist_head *tl_hash;
unsigned int tl_hash_s;
@@ -1056,6 +1055,8 @@
struct crypto_hash *csums_tfm;
struct crypto_hash *verify_tfm;
+ unsigned long last_reattach_jif;
+ unsigned long last_reconnect_jif;
struct drbd_thread receiver;
struct drbd_thread worker;
struct drbd_thread asender;
@@ -1094,7 +1095,8 @@
wait_queue_head_t ee_wait;
struct page *md_io_page; /* one page buffer for md_io */
struct page *md_io_tmpp; /* for logical_block_size != 512 */
- struct mutex md_io_mutex; /* protects the md_io_buffer */
+ struct drbd_md_io md_io;
+ atomic_t md_io_in_use; /* protects the md_io, md_io_page and md_io_tmpp */
spinlock_t al_lock;
wait_queue_head_t al_wait;
struct lru_cache *act_log; /* activity log */
@@ -1228,8 +1230,8 @@
extern int drbd_send_uuids_skip_initial_sync(struct drbd_conf *mdev);
extern int drbd_gen_and_send_sync_uuid(struct drbd_conf *mdev);
extern int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags flags);
-extern int _drbd_send_state(struct drbd_conf *mdev);
-extern int drbd_send_state(struct drbd_conf *mdev);
+extern int drbd_send_state(struct drbd_conf *mdev, union drbd_state s);
+extern int drbd_send_current_state(struct drbd_conf *mdev);
extern int _drbd_send_cmd(struct drbd_conf *mdev, struct socket *sock,
enum drbd_packets cmd, struct p_header80 *h,
size_t size, unsigned msg_flags);
@@ -1461,6 +1463,7 @@
extern int drbd_bm_write_page(struct drbd_conf *mdev, unsigned int idx) __must_hold(local);
extern int drbd_bm_read(struct drbd_conf *mdev) __must_hold(local);
extern int drbd_bm_write(struct drbd_conf *mdev) __must_hold(local);
+extern int drbd_bm_write_copy_pages(struct drbd_conf *mdev) __must_hold(local);
extern unsigned long drbd_bm_ALe_set_all(struct drbd_conf *mdev,
unsigned long al_enr);
extern size_t drbd_bm_words(struct drbd_conf *mdev);
@@ -1493,11 +1496,38 @@
extern mempool_t *drbd_request_mempool;
extern mempool_t *drbd_ee_mempool;
-extern struct page *drbd_pp_pool; /* drbd's page pool */
+/* drbd's page pool, used to buffer data received from the peer,
+ * or data requested by the peer.
+ *
+ * This does not have an emergency reserve.
+ *
+ * When allocating from this pool, it first takes pages from the pool.
+ * Only if the pool is depleted will try to allocate from the system.
+ *
+ * The assumption is that pages taken from this pool will be processed,
+ * and given back, "quickly", and then can be recycled, so we can avoid
+ * frequent calls to alloc_page(), and still will be able to make progress even
+ * under memory pressure.
+ */
+extern struct page *drbd_pp_pool;
extern spinlock_t drbd_pp_lock;
extern int drbd_pp_vacant;
extern wait_queue_head_t drbd_pp_wait;
+/* We also need a standard (emergency-reserve backed) page pool
+ * for meta data IO (activity log, bitmap).
+ * We can keep it global, as long as it is used as "N pages at a time".
+ * 128 should be plenty, currently we probably can get away with as few as 1.
+ */
+#define DRBD_MIN_POOL_PAGES 128
+extern mempool_t *drbd_md_io_page_pool;
+
+/* We also need to make sure we get a bio
+ * when we need it for housekeeping purposes */
+extern struct bio_set *drbd_md_io_bio_set;
+/* to allocate from that set */
+extern struct bio *bio_alloc_drbd(gfp_t gfp_mask);
+
extern rwlock_t global_state_lock;
extern struct drbd_conf *drbd_new_device(unsigned int minor);
@@ -1536,8 +1566,12 @@
extern void suspend_other_sg(struct drbd_conf *mdev);
extern int drbd_resync_finished(struct drbd_conf *mdev);
/* maybe rather drbd_main.c ? */
+extern void *drbd_md_get_buffer(struct drbd_conf *mdev);
+extern void drbd_md_put_buffer(struct drbd_conf *mdev);
extern int drbd_md_sync_page_io(struct drbd_conf *mdev,
- struct drbd_backing_dev *bdev, sector_t sector, int rw);
+ struct drbd_backing_dev *bdev, sector_t sector, int rw);
+extern void wait_until_done_or_disk_failure(struct drbd_conf *mdev, struct drbd_backing_dev *bdev,
+ unsigned int *done);
extern void drbd_ov_oos_found(struct drbd_conf*, sector_t, int);
extern void drbd_rs_controller_reset(struct drbd_conf *mdev);
@@ -1754,19 +1788,6 @@
#define page_chain_for_each_safe(page, n) \
for (; page && ({ n = page_chain_next(page); 1; }); page = n)
-static inline int drbd_bio_has_active_page(struct bio *bio)
-{
- struct bio_vec *bvec;
- int i;
-
- __bio_for_each_segment(bvec, bio, i, 0) {
- if (page_count(bvec->bv_page) > 1)
- return 1;
- }
-
- return 0;
-}
-
static inline int drbd_ee_has_active_page(struct drbd_epoch_entry *e)
{
struct page *page = e->pages;
@@ -1777,7 +1798,6 @@
return 0;
}
-
static inline void drbd_state_lock(struct drbd_conf *mdev)
{
wait_event(mdev->misc_wait,
@@ -2230,7 +2250,7 @@
* Note: currently we don't support such large bitmaps on 32bit
* arch anyways, but no harm done to be prepared for it here.
*/
- unsigned int shift = mdev->rs_total >= (1ULL << 32) ? 16 : 10;
+ unsigned int shift = mdev->rs_total > UINT_MAX ? 16 : 10;
unsigned long left = *bits_left >> shift;
unsigned long total = 1UL + (mdev->rs_total >> shift);
unsigned long tmp = 1000UL - left * 1000UL/total;
@@ -2306,12 +2326,12 @@
case D_OUTDATED:
case D_CONSISTENT:
case D_UP_TO_DATE:
+ case D_FAILED:
/* disk state is stable as well. */
break;
/* no new io accepted during tansitional states */
case D_ATTACHING:
- case D_FAILED:
case D_NEGOTIATING:
case D_UNKNOWN:
case D_MASK:
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 211fc44..920ede2 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -139,6 +139,8 @@
struct kmem_cache *drbd_al_ext_cache; /* activity log extents */
mempool_t *drbd_request_mempool;
mempool_t *drbd_ee_mempool;
+mempool_t *drbd_md_io_page_pool;
+struct bio_set *drbd_md_io_bio_set;
/* I do not use a standard mempool, because:
1) I want to hand out the pre-allocated objects first.
@@ -159,7 +161,24 @@
.release = drbd_release,
};
-#define ARRY_SIZE(A) (sizeof(A)/sizeof(A[0]))
+static void bio_destructor_drbd(struct bio *bio)
+{
+ bio_free(bio, drbd_md_io_bio_set);
+}
+
+struct bio *bio_alloc_drbd(gfp_t gfp_mask)
+{
+ struct bio *bio;
+
+ if (!drbd_md_io_bio_set)
+ return bio_alloc(gfp_mask, 1);
+
+ bio = bio_alloc_bioset(gfp_mask, 1, drbd_md_io_bio_set);
+ if (!bio)
+ return NULL;
+ bio->bi_destructor = bio_destructor_drbd;
+ return bio;
+}
#ifdef __CHECKER__
/* When checking with sparse, and this is an inline function, sparse will
@@ -208,6 +227,7 @@
mdev->oldest_tle = b;
mdev->newest_tle = b;
INIT_LIST_HEAD(&mdev->out_of_sequence_requests);
+ INIT_LIST_HEAD(&mdev->barrier_acked_requests);
mdev->tl_hash = NULL;
mdev->tl_hash_s = 0;
@@ -246,9 +266,7 @@
new->n_writes = 0;
newest_before = mdev->newest_tle;
- /* never send a barrier number == 0, because that is special-cased
- * when using TCQ for our write ordering code */
- new->br_number = (newest_before->br_number+1) ?: 1;
+ new->br_number = newest_before->br_number+1;
if (mdev->newest_tle != new) {
mdev->newest_tle->next = new;
mdev->newest_tle = new;
@@ -311,7 +329,7 @@
These have been list_move'd to the out_of_sequence_requests list in
_req_mod(, barrier_acked) above.
*/
- list_del_init(&b->requests);
+ list_splice_init(&b->requests, &mdev->barrier_acked_requests);
nob = b->next;
if (test_and_clear_bit(CREATE_BARRIER, &mdev->flags)) {
@@ -411,6 +429,23 @@
b = tmp;
list_splice(&carry_reads, &b->requests);
}
+
+ /* Actions operating on the disk state, also want to work on
+ requests that got barrier acked. */
+ switch (what) {
+ case fail_frozen_disk_io:
+ case restart_frozen_disk_io:
+ list_for_each_safe(le, tle, &mdev->barrier_acked_requests) {
+ req = list_entry(le, struct drbd_request, tl_requests);
+ _req_mod(req, what);
+ }
+
+ case connection_lost_while_pending:
+ case resend:
+ break;
+ default:
+ dev_err(DEV, "what = %d in _tl_restart()\n", what);
+ }
}
@@ -458,6 +493,38 @@
}
/**
+ * tl_abort_disk_io() - Abort disk I/O for all requests for a certain mdev in the TL
+ * @mdev: DRBD device.
+ */
+void tl_abort_disk_io(struct drbd_conf *mdev)
+{
+ struct drbd_tl_epoch *b;
+ struct list_head *le, *tle;
+ struct drbd_request *req;
+
+ spin_lock_irq(&mdev->req_lock);
+ b = mdev->oldest_tle;
+ while (b) {
+ list_for_each_safe(le, tle, &b->requests) {
+ req = list_entry(le, struct drbd_request, tl_requests);
+ if (!(req->rq_state & RQ_LOCAL_PENDING))
+ continue;
+ _req_mod(req, abort_disk_io);
+ }
+ b = b->next;
+ }
+
+ list_for_each_safe(le, tle, &mdev->barrier_acked_requests) {
+ req = list_entry(le, struct drbd_request, tl_requests);
+ if (!(req->rq_state & RQ_LOCAL_PENDING))
+ continue;
+ _req_mod(req, abort_disk_io);
+ }
+
+ spin_unlock_irq(&mdev->req_lock);
+}
+
+/**
* cl_wide_st_chg() - true if the state change is a cluster wide one
* @mdev: DRBD device.
* @os: old (current) state.
@@ -470,7 +537,7 @@
((os.role != R_PRIMARY && ns.role == R_PRIMARY) ||
(os.conn != C_STARTING_SYNC_T && ns.conn == C_STARTING_SYNC_T) ||
(os.conn != C_STARTING_SYNC_S && ns.conn == C_STARTING_SYNC_S) ||
- (os.disk != D_DISKLESS && ns.disk == D_DISKLESS))) ||
+ (os.disk != D_FAILED && ns.disk == D_FAILED))) ||
(os.conn >= C_CONNECTED && ns.conn == C_DISCONNECTING) ||
(os.conn == C_CONNECTED && ns.conn == C_VERIFY_S);
}
@@ -509,8 +576,16 @@
static enum drbd_state_rv is_valid_state_transition(struct drbd_conf *,
union drbd_state,
union drbd_state);
+enum sanitize_state_warnings {
+ NO_WARNING,
+ ABORTED_ONLINE_VERIFY,
+ ABORTED_RESYNC,
+ CONNECTION_LOST_NEGOTIATING,
+ IMPLICITLY_UPGRADED_DISK,
+ IMPLICITLY_UPGRADED_PDSK,
+};
static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state os,
- union drbd_state ns, const char **warn_sync_abort);
+ union drbd_state ns, enum sanitize_state_warnings *warn);
int drbd_send_state_req(struct drbd_conf *,
union drbd_state, union drbd_state);
@@ -785,6 +860,13 @@
if (ns.conn == os.conn && ns.conn == C_WF_REPORT_PARAMS)
rv = SS_IN_TRANSIENT_STATE;
+ /* While establishing a connection only allow cstate to change.
+ Delay/refuse role changes, detach attach etc... */
+ if (test_bit(STATE_SENT, &mdev->flags) &&
+ !(os.conn == C_WF_REPORT_PARAMS ||
+ (ns.conn == C_WF_REPORT_PARAMS && os.conn == C_WF_CONNECTION)))
+ rv = SS_IN_TRANSIENT_STATE;
+
if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) && os.conn < C_CONNECTED)
rv = SS_NEED_CONNECTION;
@@ -803,6 +885,21 @@
return rv;
}
+static void print_sanitize_warnings(struct drbd_conf *mdev, enum sanitize_state_warnings warn)
+{
+ static const char *msg_table[] = {
+ [NO_WARNING] = "",
+ [ABORTED_ONLINE_VERIFY] = "Online-verify aborted.",
+ [ABORTED_RESYNC] = "Resync aborted.",
+ [CONNECTION_LOST_NEGOTIATING] = "Connection lost while negotiating, no data!",
+ [IMPLICITLY_UPGRADED_DISK] = "Implicitly upgraded disk",
+ [IMPLICITLY_UPGRADED_PDSK] = "Implicitly upgraded pdsk",
+ };
+
+ if (warn != NO_WARNING)
+ dev_warn(DEV, "%s\n", msg_table[warn]);
+}
+
/**
* sanitize_state() - Resolves implicitly necessary additional changes to a state transition
* @mdev: DRBD device.
@@ -814,11 +911,14 @@
* to D_UNKNOWN. This rule and many more along those lines are in this function.
*/
static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state os,
- union drbd_state ns, const char **warn_sync_abort)
+ union drbd_state ns, enum sanitize_state_warnings *warn)
{
enum drbd_fencing_p fp;
enum drbd_disk_state disk_min, disk_max, pdsk_min, pdsk_max;
+ if (warn)
+ *warn = NO_WARNING;
+
fp = FP_DONT_CARE;
if (get_ldev(mdev)) {
fp = mdev->ldev->dc.fencing;
@@ -833,18 +933,13 @@
/* After a network error (+C_TEAR_DOWN) only C_UNCONNECTED or C_DISCONNECTING can follow.
* If you try to go into some Sync* state, that shall fail (elsewhere). */
if (os.conn >= C_TIMEOUT && os.conn <= C_TEAR_DOWN &&
- ns.conn != C_UNCONNECTED && ns.conn != C_DISCONNECTING && ns.conn <= C_TEAR_DOWN)
+ ns.conn != C_UNCONNECTED && ns.conn != C_DISCONNECTING && ns.conn <= C_CONNECTED)
ns.conn = os.conn;
/* we cannot fail (again) if we already detached */
if (ns.disk == D_FAILED && os.disk == D_DISKLESS)
ns.disk = D_DISKLESS;
- /* if we are only D_ATTACHING yet,
- * we can (and should) go directly to D_DISKLESS. */
- if (ns.disk == D_FAILED && os.disk == D_ATTACHING)
- ns.disk = D_DISKLESS;
-
/* After C_DISCONNECTING only C_STANDALONE may follow */
if (os.conn == C_DISCONNECTING && ns.conn != C_STANDALONE)
ns.conn = os.conn;
@@ -863,10 +958,9 @@
/* Abort resync if a disk fails/detaches */
if (os.conn > C_CONNECTED && ns.conn > C_CONNECTED &&
(ns.disk <= D_FAILED || ns.pdsk <= D_FAILED)) {
- if (warn_sync_abort)
- *warn_sync_abort =
- os.conn == C_VERIFY_S || os.conn == C_VERIFY_T ?
- "Online-verify" : "Resync";
+ if (warn)
+ *warn = os.conn == C_VERIFY_S || os.conn == C_VERIFY_T ?
+ ABORTED_ONLINE_VERIFY : ABORTED_RESYNC;
ns.conn = C_CONNECTED;
}
@@ -877,7 +971,8 @@
ns.disk = mdev->new_state_tmp.disk;
ns.pdsk = mdev->new_state_tmp.pdsk;
} else {
- dev_alert(DEV, "Connection lost while negotiating, no data!\n");
+ if (warn)
+ *warn = CONNECTION_LOST_NEGOTIATING;
ns.disk = D_DISKLESS;
ns.pdsk = D_UNKNOWN;
}
@@ -959,16 +1054,16 @@
ns.disk = disk_max;
if (ns.disk < disk_min) {
- dev_warn(DEV, "Implicitly set disk from %s to %s\n",
- drbd_disk_str(ns.disk), drbd_disk_str(disk_min));
+ if (warn)
+ *warn = IMPLICITLY_UPGRADED_DISK;
ns.disk = disk_min;
}
if (ns.pdsk > pdsk_max)
ns.pdsk = pdsk_max;
if (ns.pdsk < pdsk_min) {
- dev_warn(DEV, "Implicitly set pdsk from %s to %s\n",
- drbd_disk_str(ns.pdsk), drbd_disk_str(pdsk_min));
+ if (warn)
+ *warn = IMPLICITLY_UPGRADED_PDSK;
ns.pdsk = pdsk_min;
}
@@ -1045,12 +1140,12 @@
{
union drbd_state os;
enum drbd_state_rv rv = SS_SUCCESS;
- const char *warn_sync_abort = NULL;
+ enum sanitize_state_warnings ssw;
struct after_state_chg_work *ascw;
os = mdev->state;
- ns = sanitize_state(mdev, os, ns, &warn_sync_abort);
+ ns = sanitize_state(mdev, os, ns, &ssw);
if (ns.i == os.i)
return SS_NOTHING_TO_DO;
@@ -1076,8 +1171,7 @@
return rv;
}
- if (warn_sync_abort)
- dev_warn(DEV, "%s aborted.\n", warn_sync_abort);
+ print_sanitize_warnings(mdev, ssw);
{
char *pbp, pb[300];
@@ -1243,7 +1337,7 @@
drbd_thread_stop_nowait(&mdev->receiver);
/* Upon network failure, we need to restart the receiver. */
- if (os.conn > C_TEAR_DOWN &&
+ if (os.conn > C_WF_CONNECTION &&
ns.conn <= C_TEAR_DOWN && ns.conn >= C_TIMEOUT)
drbd_thread_restart_nowait(&mdev->receiver);
@@ -1251,6 +1345,15 @@
if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED)
drbd_resume_al(mdev);
+ /* remember last connect and attach times so request_timer_fn() won't
+ * kill newly established sessions while we are still trying to thaw
+ * previously frozen IO */
+ if (os.conn != C_WF_REPORT_PARAMS && ns.conn == C_WF_REPORT_PARAMS)
+ mdev->last_reconnect_jif = jiffies;
+ if ((os.disk == D_ATTACHING || os.disk == D_NEGOTIATING) &&
+ ns.disk > D_NEGOTIATING)
+ mdev->last_reattach_jif = jiffies;
+
ascw = kmalloc(sizeof(*ascw), GFP_ATOMIC);
if (ascw) {
ascw->os = os;
@@ -1354,12 +1457,16 @@
/* Here we have the actions that are performed after a
state change. This function might sleep */
+ if (os.disk <= D_NEGOTIATING && ns.disk > D_NEGOTIATING)
+ mod_timer(&mdev->request_timer, jiffies + HZ);
+
nsm.i = -1;
if (ns.susp_nod) {
if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED)
what = resend;
- if (os.disk == D_ATTACHING && ns.disk > D_ATTACHING)
+ if ((os.disk == D_ATTACHING || os.disk == D_NEGOTIATING) &&
+ ns.disk > D_NEGOTIATING)
what = restart_frozen_disk_io;
if (what != nothing)
@@ -1408,7 +1515,7 @@
/* Do not change the order of the if above and the two below... */
if (os.pdsk == D_DISKLESS && ns.pdsk > D_DISKLESS) { /* attach on the peer */
drbd_send_uuids(mdev);
- drbd_send_state(mdev);
+ drbd_send_state(mdev, ns);
}
/* No point in queuing send_bitmap if we don't have a connection
* anymore, so check also the _current_ state, not only the new state
@@ -1441,11 +1548,11 @@
}
if (ns.pdsk < D_INCONSISTENT && get_ldev(mdev)) {
- if (ns.peer == R_PRIMARY && mdev->ldev->md.uuid[UI_BITMAP] == 0) {
+ if (os.peer == R_SECONDARY && ns.peer == R_PRIMARY &&
+ mdev->ldev->md.uuid[UI_BITMAP] == 0 && ns.disk >= D_UP_TO_DATE) {
drbd_uuid_new_current(mdev);
drbd_send_uuids(mdev);
}
-
/* D_DISKLESS Peer becomes secondary */
if (os.peer == R_PRIMARY && ns.peer == R_SECONDARY)
/* We may still be Primary ourselves.
@@ -1473,14 +1580,14 @@
os.disk == D_ATTACHING && ns.disk == D_NEGOTIATING) {
drbd_send_sizes(mdev, 0, 0); /* to start sync... */
drbd_send_uuids(mdev);
- drbd_send_state(mdev);
+ drbd_send_state(mdev, ns);
}
/* We want to pause/continue resync, tell peer. */
if (ns.conn >= C_CONNECTED &&
((os.aftr_isp != ns.aftr_isp) ||
(os.user_isp != ns.user_isp)))
- drbd_send_state(mdev);
+ drbd_send_state(mdev, ns);
/* In case one of the isp bits got set, suspend other devices. */
if ((!os.aftr_isp && !os.peer_isp && !os.user_isp) &&
@@ -1490,10 +1597,10 @@
/* Make sure the peer gets informed about eventual state
changes (ISP bits) while we were in WFReportParams. */
if (os.conn == C_WF_REPORT_PARAMS && ns.conn >= C_CONNECTED)
- drbd_send_state(mdev);
+ drbd_send_state(mdev, ns);
if (os.conn != C_AHEAD && ns.conn == C_AHEAD)
- drbd_send_state(mdev);
+ drbd_send_state(mdev, ns);
/* We are in the progress to start a full sync... */
if ((os.conn != C_STARTING_SYNC_T && ns.conn == C_STARTING_SYNC_T) ||
@@ -1513,33 +1620,38 @@
/* first half of local IO error, failure to attach,
* or administrative detach */
if (os.disk != D_FAILED && ns.disk == D_FAILED) {
- enum drbd_io_error_p eh;
- int was_io_error;
+ enum drbd_io_error_p eh = EP_PASS_ON;
+ int was_io_error = 0;
/* corresponding get_ldev was in __drbd_set_state, to serialize
- * our cleanup here with the transition to D_DISKLESS,
- * so it is safe to dreference ldev here. */
- eh = mdev->ldev->dc.on_io_error;
- was_io_error = test_and_clear_bit(WAS_IO_ERROR, &mdev->flags);
+ * our cleanup here with the transition to D_DISKLESS.
+ * But is is still not save to dreference ldev here, since
+ * we might come from an failed Attach before ldev was set. */
+ if (mdev->ldev) {
+ eh = mdev->ldev->dc.on_io_error;
+ was_io_error = test_and_clear_bit(WAS_IO_ERROR, &mdev->flags);
- /* current state still has to be D_FAILED,
- * there is only one way out: to D_DISKLESS,
- * and that may only happen after our put_ldev below. */
- if (mdev->state.disk != D_FAILED)
- dev_err(DEV,
- "ASSERT FAILED: disk is %s during detach\n",
- drbd_disk_str(mdev->state.disk));
+ /* Immediately allow completion of all application IO, that waits
+ for completion from the local disk. */
+ tl_abort_disk_io(mdev);
- if (drbd_send_state(mdev))
- dev_warn(DEV, "Notified peer that I am detaching my disk\n");
- else
- dev_err(DEV, "Sending state for detaching disk failed\n");
+ /* current state still has to be D_FAILED,
+ * there is only one way out: to D_DISKLESS,
+ * and that may only happen after our put_ldev below. */
+ if (mdev->state.disk != D_FAILED)
+ dev_err(DEV,
+ "ASSERT FAILED: disk is %s during detach\n",
+ drbd_disk_str(mdev->state.disk));
- drbd_rs_cancel_all(mdev);
+ if (ns.conn >= C_CONNECTED)
+ drbd_send_state(mdev, ns);
- /* In case we want to get something to stable storage still,
- * this may be the last chance.
- * Following put_ldev may transition to D_DISKLESS. */
- drbd_md_sync(mdev);
+ drbd_rs_cancel_all(mdev);
+
+ /* In case we want to get something to stable storage still,
+ * this may be the last chance.
+ * Following put_ldev may transition to D_DISKLESS. */
+ drbd_md_sync(mdev);
+ }
put_ldev(mdev);
if (was_io_error && eh == EP_CALL_HELPER)
@@ -1561,16 +1673,17 @@
mdev->rs_failed = 0;
atomic_set(&mdev->rs_pending_cnt, 0);
- if (drbd_send_state(mdev))
- dev_warn(DEV, "Notified peer that I'm now diskless.\n");
+ if (ns.conn >= C_CONNECTED)
+ drbd_send_state(mdev, ns);
+
/* corresponding get_ldev in __drbd_set_state
* this may finally trigger drbd_ldev_destroy. */
put_ldev(mdev);
}
/* Notify peer that I had a local IO error, and did not detached.. */
- if (os.disk == D_UP_TO_DATE && ns.disk == D_INCONSISTENT)
- drbd_send_state(mdev);
+ if (os.disk == D_UP_TO_DATE && ns.disk == D_INCONSISTENT && ns.conn >= C_CONNECTED)
+ drbd_send_state(mdev, ns);
/* Disks got bigger while they were detached */
if (ns.disk > D_NEGOTIATING && ns.pdsk > D_NEGOTIATING &&
@@ -1588,7 +1701,13 @@
/* sync target done with resync. Explicitly notify peer, even though
* it should (at least for non-empty resyncs) already know itself. */
if (os.disk < D_UP_TO_DATE && os.conn >= C_SYNC_SOURCE && ns.conn == C_CONNECTED)
- drbd_send_state(mdev);
+ drbd_send_state(mdev, ns);
+
+ /* Wake up role changes, that were delayed because of connection establishing */
+ if (os.conn == C_WF_REPORT_PARAMS && ns.conn != C_WF_REPORT_PARAMS) {
+ clear_bit(STATE_SENT, &mdev->flags);
+ wake_up(&mdev->state_wait);
+ }
/* This triggers bitmap writeout of potentially still unwritten pages
* if the resync finished cleanly, or aborted because of peer disk
@@ -1598,8 +1717,8 @@
* No harm done if some bits change during this phase.
*/
if (os.conn > C_CONNECTED && ns.conn <= C_CONNECTED && get_ldev(mdev)) {
- drbd_queue_bitmap_io(mdev, &drbd_bm_write, NULL,
- "write from resync_finished", BM_LOCKED_SET_ALLOWED);
+ drbd_queue_bitmap_io(mdev, &drbd_bm_write_copy_pages, NULL,
+ "write from resync_finished", BM_LOCKED_CHANGE_ALLOWED);
put_ldev(mdev);
}
@@ -2057,7 +2176,11 @@
D_ASSERT(mdev->state.disk == D_UP_TO_DATE);
- uuid = mdev->ldev->md.uuid[UI_BITMAP] + UUID_NEW_BM_OFFSET;
+ uuid = mdev->ldev->md.uuid[UI_BITMAP];
+ if (uuid && uuid != UUID_JUST_CREATED)
+ uuid = uuid + UUID_NEW_BM_OFFSET;
+ else
+ get_random_bytes(&uuid, sizeof(u64));
drbd_uuid_set(mdev, UI_BITMAP, uuid);
drbd_print_uuids(mdev, "updated sync UUID");
drbd_md_sync(mdev);
@@ -2089,6 +2212,10 @@
max_bio_size = DRBD_MAX_BIO_SIZE; /* ... multiple BIOs per peer_request */
}
+ /* Never allow old drbd (up to 8.3.7) to see more than 32KiB */
+ if (mdev->agreed_pro_version <= 94)
+ max_bio_size = min_t(int, max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
+
p.d_size = cpu_to_be64(d_size);
p.u_size = cpu_to_be64(u_size);
p.c_size = cpu_to_be64(trigger_reply ? 0 : drbd_get_capacity(mdev->this_bdev));
@@ -2102,10 +2229,10 @@
}
/**
- * drbd_send_state() - Sends the drbd state to the peer
+ * drbd_send_current_state() - Sends the drbd state to the peer
* @mdev: DRBD device.
*/
-int drbd_send_state(struct drbd_conf *mdev)
+int drbd_send_current_state(struct drbd_conf *mdev)
{
struct socket *sock;
struct p_state p;
@@ -2131,6 +2258,37 @@
return ok;
}
+/**
+ * drbd_send_state() - After a state change, sends the new state to the peer
+ * @mdev: DRBD device.
+ * @state: the state to send, not necessarily the current state.
+ *
+ * Each state change queues an "after_state_ch" work, which will eventually
+ * send the resulting new state to the peer. If more state changes happen
+ * between queuing and processing of the after_state_ch work, we still
+ * want to send each intermediary state in the order it occurred.
+ */
+int drbd_send_state(struct drbd_conf *mdev, union drbd_state state)
+{
+ struct socket *sock;
+ struct p_state p;
+ int ok = 0;
+
+ mutex_lock(&mdev->data.mutex);
+
+ p.state = cpu_to_be32(state.i);
+ sock = mdev->data.socket;
+
+ if (likely(sock != NULL)) {
+ ok = _drbd_send_cmd(mdev, sock, P_STATE,
+ (struct p_header80 *)&p, sizeof(p), 0);
+ }
+
+ mutex_unlock(&mdev->data.mutex);
+
+ return ok;
+}
+
int drbd_send_state_req(struct drbd_conf *mdev,
union drbd_state mask, union drbd_state val)
{
@@ -2615,7 +2773,7 @@
struct bio_vec *bvec;
int i;
/* hint all but last page with MSG_MORE */
- __bio_for_each_segment(bvec, bio, i, 0) {
+ bio_for_each_segment(bvec, bio, i) {
if (!_drbd_no_send_page(mdev, bvec->bv_page,
bvec->bv_offset, bvec->bv_len,
i == bio->bi_vcnt -1 ? 0 : MSG_MORE))
@@ -2629,7 +2787,7 @@
struct bio_vec *bvec;
int i;
/* hint all but last page with MSG_MORE */
- __bio_for_each_segment(bvec, bio, i, 0) {
+ bio_for_each_segment(bvec, bio, i) {
if (!_drbd_send_page(mdev, bvec->bv_page,
bvec->bv_offset, bvec->bv_len,
i == bio->bi_vcnt -1 ? 0 : MSG_MORE))
@@ -2695,8 +2853,7 @@
p.sector = cpu_to_be64(req->sector);
p.block_id = (unsigned long)req;
- p.seq_num = cpu_to_be32(req->seq_num =
- atomic_add_return(1, &mdev->packet_seq));
+ p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
@@ -2987,8 +3144,8 @@
atomic_set(&mdev->rs_sect_in, 0);
atomic_set(&mdev->rs_sect_ev, 0);
atomic_set(&mdev->ap_in_flight, 0);
+ atomic_set(&mdev->md_io_in_use, 0);
- mutex_init(&mdev->md_io_mutex);
mutex_init(&mdev->data.mutex);
mutex_init(&mdev->meta.mutex);
sema_init(&mdev->data.work.s, 0);
@@ -3126,6 +3283,10 @@
/* D_ASSERT(atomic_read(&drbd_pp_vacant)==0); */
+ if (drbd_md_io_bio_set)
+ bioset_free(drbd_md_io_bio_set);
+ if (drbd_md_io_page_pool)
+ mempool_destroy(drbd_md_io_page_pool);
if (drbd_ee_mempool)
mempool_destroy(drbd_ee_mempool);
if (drbd_request_mempool)
@@ -3139,6 +3300,8 @@
if (drbd_al_ext_cache)
kmem_cache_destroy(drbd_al_ext_cache);
+ drbd_md_io_bio_set = NULL;
+ drbd_md_io_page_pool = NULL;
drbd_ee_mempool = NULL;
drbd_request_mempool = NULL;
drbd_ee_cache = NULL;
@@ -3162,6 +3325,8 @@
drbd_bm_ext_cache = NULL;
drbd_al_ext_cache = NULL;
drbd_pp_pool = NULL;
+ drbd_md_io_page_pool = NULL;
+ drbd_md_io_bio_set = NULL;
/* caches */
drbd_request_cache = kmem_cache_create(
@@ -3185,6 +3350,16 @@
goto Enomem;
/* mempools */
+#ifdef COMPAT_HAVE_BIOSET_CREATE
+ drbd_md_io_bio_set = bioset_create(DRBD_MIN_POOL_PAGES, 0);
+ if (drbd_md_io_bio_set == NULL)
+ goto Enomem;
+#endif
+
+ drbd_md_io_page_pool = mempool_create_page_pool(DRBD_MIN_POOL_PAGES, 0);
+ if (drbd_md_io_page_pool == NULL)
+ goto Enomem;
+
drbd_request_mempool = mempool_create(number,
mempool_alloc_slab, mempool_free_slab, drbd_request_cache);
if (drbd_request_mempool == NULL)
@@ -3262,6 +3437,8 @@
if (!mdev)
return;
+ del_timer_sync(&mdev->request_timer);
+
/* paranoia asserts */
if (mdev->open_cnt != 0)
dev_err(DEV, "open_cnt = %d in %s:%u", mdev->open_cnt,
@@ -3666,8 +3843,10 @@
if (!get_ldev_if_state(mdev, D_FAILED))
return;
- mutex_lock(&mdev->md_io_mutex);
- buffer = (struct meta_data_on_disk *)page_address(mdev->md_io_page);
+ buffer = drbd_md_get_buffer(mdev);
+ if (!buffer)
+ goto out;
+
memset(buffer, 0, 512);
buffer->la_size = cpu_to_be64(drbd_get_capacity(mdev->this_bdev));
@@ -3698,7 +3877,8 @@
* since we updated it on metadata. */
mdev->ldev->md.la_size_sect = drbd_get_capacity(mdev->this_bdev);
- mutex_unlock(&mdev->md_io_mutex);
+ drbd_md_put_buffer(mdev);
+out:
put_ldev(mdev);
}
@@ -3718,8 +3898,9 @@
if (!get_ldev_if_state(mdev, D_ATTACHING))
return ERR_IO_MD_DISK;
- mutex_lock(&mdev->md_io_mutex);
- buffer = (struct meta_data_on_disk *)page_address(mdev->md_io_page);
+ buffer = drbd_md_get_buffer(mdev);
+ if (!buffer)
+ goto out;
if (!drbd_md_sync_page_io(mdev, bdev, bdev->md.md_offset, READ)) {
/* NOTE: can't do normal error processing here as this is
@@ -3780,7 +3961,8 @@
mdev->sync_conf.al_extents = 127;
err:
- mutex_unlock(&mdev->md_io_mutex);
+ drbd_md_put_buffer(mdev);
+ out:
put_ldev(mdev);
return rv;
@@ -4183,12 +4365,11 @@
static char buildtag[38] = "\0uilt-in";
if (buildtag[0] == 0) {
-#ifdef CONFIG_MODULES
- if (THIS_MODULE != NULL)
- sprintf(buildtag, "srcversion: %-24s", THIS_MODULE->srcversion);
- else
+#ifdef MODULE
+ sprintf(buildtag, "srcversion: %-24s", THIS_MODULE->srcversion);
+#else
+ buildtag[0] = 'b';
#endif
- buildtag[0] = 'b';
}
return buildtag;
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index 946166e..6d4de6a 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -289,7 +289,7 @@
*/
spin_lock_irq(&mdev->req_lock);
ns = mdev->state;
- if (ns.conn < C_WF_REPORT_PARAMS) {
+ if (ns.conn < C_WF_REPORT_PARAMS && !test_bit(STATE_SENT, &mdev->flags)) {
ns.pdsk = nps;
_drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
}
@@ -432,7 +432,7 @@
/* if this was forced, we should consider sync */
if (forced)
drbd_send_uuids(mdev);
- drbd_send_state(mdev);
+ drbd_send_current_state(mdev);
}
drbd_md_sync(mdev);
@@ -845,9 +845,10 @@
Because new from 8.3.8 onwards the peer can use multiple
BIOs for a single peer_request */
if (mdev->state.conn >= C_CONNECTED) {
- if (mdev->agreed_pro_version < 94)
- peer = mdev->peer_max_bio_size;
- else if (mdev->agreed_pro_version == 94)
+ if (mdev->agreed_pro_version < 94) {
+ peer = min_t(int, mdev->peer_max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
+ /* Correct old drbd (up to 8.3.7) if it believes it can do more than 32KiB */
+ } else if (mdev->agreed_pro_version == 94)
peer = DRBD_MAX_SIZE_H80_PACKET;
else /* drbd 8.3.8 onwards */
peer = DRBD_MAX_BIO_SIZE;
@@ -1032,7 +1033,7 @@
dev_err(DEV, "max capacity %llu smaller than disk size %llu\n",
(unsigned long long) drbd_get_max_capacity(nbc),
(unsigned long long) nbc->dc.disk_size);
- retcode = ERR_DISK_TO_SMALL;
+ retcode = ERR_DISK_TOO_SMALL;
goto fail;
}
@@ -1046,7 +1047,7 @@
}
if (drbd_get_capacity(nbc->md_bdev) < min_md_device_sectors) {
- retcode = ERR_MD_DISK_TO_SMALL;
+ retcode = ERR_MD_DISK_TOO_SMALL;
dev_warn(DEV, "refusing attach: md-device too small, "
"at least %llu sectors needed for this meta-disk type\n",
(unsigned long long) min_md_device_sectors);
@@ -1057,7 +1058,7 @@
* (we may currently be R_PRIMARY with no local disk...) */
if (drbd_get_max_capacity(nbc) <
drbd_get_capacity(mdev->this_bdev)) {
- retcode = ERR_DISK_TO_SMALL;
+ retcode = ERR_DISK_TOO_SMALL;
goto fail;
}
@@ -1138,7 +1139,7 @@
if (drbd_md_test_flag(nbc, MDF_CONSISTENT) &&
drbd_new_dev_size(mdev, nbc, 0) < nbc->md.la_size_sect) {
dev_warn(DEV, "refusing to truncate a consistent device\n");
- retcode = ERR_DISK_TO_SMALL;
+ retcode = ERR_DISK_TOO_SMALL;
goto force_diskless_dec;
}
@@ -1336,17 +1337,34 @@
{
enum drbd_ret_code retcode;
int ret;
+ struct detach dt = {};
+
+ if (!detach_from_tags(mdev, nlp->tag_list, &dt)) {
+ reply->ret_code = ERR_MANDATORY_TAG;
+ goto out;
+ }
+
+ if (dt.detach_force) {
+ drbd_force_state(mdev, NS(disk, D_FAILED));
+ reply->ret_code = SS_SUCCESS;
+ goto out;
+ }
+
drbd_suspend_io(mdev); /* so no-one is stuck in drbd_al_begin_io */
+ drbd_md_get_buffer(mdev); /* make sure there is no in-flight meta-data IO */
retcode = drbd_request_state(mdev, NS(disk, D_FAILED));
+ drbd_md_put_buffer(mdev);
/* D_FAILED will transition to DISKLESS. */
ret = wait_event_interruptible(mdev->misc_wait,
mdev->state.disk != D_FAILED);
drbd_resume_io(mdev);
+
if ((int)retcode == (int)SS_IS_DISKLESS)
retcode = SS_NOTHING_TO_DO;
if (ret)
retcode = ERR_INTR;
reply->ret_code = retcode;
+out:
return 0;
}
@@ -1711,7 +1729,7 @@
if (rs.no_resync && mdev->agreed_pro_version < 93) {
retcode = ERR_NEED_APV_93;
- goto fail;
+ goto fail_ldev;
}
if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev))
@@ -1738,6 +1756,10 @@
fail:
reply->ret_code = retcode;
return 0;
+
+ fail_ldev:
+ put_ldev(mdev);
+ goto fail;
}
static int drbd_nl_syncer_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
@@ -1941,6 +1963,7 @@
/* If there is still bitmap IO pending, probably because of a previous
* resync just being finished, wait for it before requesting a new resync. */
+ drbd_suspend_io(mdev);
wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T), CS_ORDERED);
@@ -1959,6 +1982,7 @@
retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T));
}
+ drbd_resume_io(mdev);
reply->ret_code = retcode;
return 0;
@@ -1980,6 +2004,7 @@
/* If there is still bitmap IO pending, probably because of a previous
* resync just being finished, wait for it before requesting a new resync. */
+ drbd_suspend_io(mdev);
wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S), CS_ORDERED);
@@ -1998,6 +2023,7 @@
} else
retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S));
}
+ drbd_resume_io(mdev);
reply->ret_code = retcode;
return 0;
@@ -2170,11 +2196,13 @@
/* If there is still bitmap IO pending, e.g. previous resync or verify
* just being finished, wait for it before requesting a new resync. */
+ drbd_suspend_io(mdev);
wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
/* w_make_ov_request expects position to be aligned */
mdev->ov_start_sector = args.start_sector & ~BM_SECT_PER_BIT;
reply->ret_code = drbd_request_state(mdev,NS(conn,C_VERIFY_S));
+ drbd_resume_io(mdev);
return 0;
}
diff --git a/drivers/block/drbd/drbd_proc.c b/drivers/block/drbd/drbd_proc.c
index 2959cdf..869bada 100644
--- a/drivers/block/drbd/drbd_proc.c
+++ b/drivers/block/drbd/drbd_proc.c
@@ -52,7 +52,7 @@
if (unlikely(v >= 1000000)) {
/* cool: > GiByte/s */
seq_printf(seq, "%ld,", v / 1000000);
- v /= 1000000;
+ v %= 1000000;
seq_printf(seq, "%03ld,%03ld", v/1000, v % 1000);
} else if (likely(v >= 1000))
seq_printf(seq, "%ld,%03ld", v/1000, v % 1000);
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 436f519..ea4836e 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -466,6 +466,7 @@
goto out;
}
(*newsock)->ops = sock->ops;
+ __module_get((*newsock)->ops->owner);
out:
return err;
@@ -750,6 +751,7 @@
{
struct socket *s, *sock, *msock;
int try, h, ok;
+ enum drbd_state_rv rv;
D_ASSERT(!mdev->data.socket);
@@ -888,25 +890,32 @@
}
}
- if (drbd_request_state(mdev, NS(conn, C_WF_REPORT_PARAMS)) < SS_SUCCESS)
- return 0;
-
sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
atomic_set(&mdev->packet_seq, 0);
mdev->peer_seq = 0;
- drbd_thread_start(&mdev->asender);
-
if (drbd_send_protocol(mdev) == -1)
return -1;
+ set_bit(STATE_SENT, &mdev->flags);
drbd_send_sync_param(mdev, &mdev->sync_conf);
drbd_send_sizes(mdev, 0, 0);
drbd_send_uuids(mdev);
- drbd_send_state(mdev);
+ drbd_send_current_state(mdev);
clear_bit(USE_DEGR_WFC_T, &mdev->flags);
clear_bit(RESIZE_PENDING, &mdev->flags);
+
+ spin_lock_irq(&mdev->req_lock);
+ rv = _drbd_set_state(_NS(mdev, conn, C_WF_REPORT_PARAMS), CS_VERBOSE, NULL);
+ if (mdev->state.conn != C_WF_REPORT_PARAMS)
+ clear_bit(STATE_SENT, &mdev->flags);
+ spin_unlock_irq(&mdev->req_lock);
+
+ if (rv < SS_SUCCESS)
+ return 0;
+
+ drbd_thread_start(&mdev->asender);
mod_timer(&mdev->request_timer, jiffies + HZ); /* just start it here. */
return 1;
@@ -957,7 +966,7 @@
rv = blkdev_issue_flush(mdev->ldev->backing_bdev, GFP_KERNEL,
NULL);
if (rv) {
- dev_err(DEV, "local disk flush failed with status %d\n", rv);
+ dev_info(DEV, "local disk flush failed with status %d\n", rv);
/* would rather check on EOPNOTSUPP, but that is not reliable.
* don't try again for ANY return value != 0
* if (rv == -EOPNOTSUPP) */
@@ -1001,13 +1010,14 @@
if (epoch_size != 0 &&
atomic_read(&epoch->active) == 0 &&
- test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags)) {
+ (test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags) || ev & EV_CLEANUP)) {
if (!(ev & EV_CLEANUP)) {
spin_unlock(&mdev->epoch_lock);
drbd_send_b_ack(mdev, epoch->barrier_nr, epoch_size);
spin_lock(&mdev->epoch_lock);
}
- dec_unacked(mdev);
+ if (test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags))
+ dec_unacked(mdev);
if (mdev->current_epoch != epoch) {
next_epoch = list_entry(epoch->list.next, struct drbd_epoch, list);
@@ -1096,7 +1106,11 @@
/* In most cases, we will only need one bio. But in case the lower
* level restrictions happen to be different at this offset on this
* side than those of the sending peer, we may need to submit the
- * request in more than one bio. */
+ * request in more than one bio.
+ *
+ * Plain bio_alloc is good enough here, this is no DRBD internally
+ * generated bio, but a bio allocated on behalf of the peer.
+ */
next_bio:
bio = bio_alloc(GFP_NOIO, nr_pages);
if (!bio) {
@@ -1583,6 +1597,24 @@
return ok;
}
+static bool overlapping_resync_write(struct drbd_conf *mdev, struct drbd_epoch_entry *data_e)
+{
+
+ struct drbd_epoch_entry *rs_e;
+ bool rv = 0;
+
+ spin_lock_irq(&mdev->req_lock);
+ list_for_each_entry(rs_e, &mdev->sync_ee, w.list) {
+ if (overlaps(data_e->sector, data_e->size, rs_e->sector, rs_e->size)) {
+ rv = 1;
+ break;
+ }
+ }
+ spin_unlock_irq(&mdev->req_lock);
+
+ return rv;
+}
+
/* Called from receive_Data.
* Synchronize packets on sock with packets on msock.
*
@@ -1826,6 +1858,9 @@
list_add(&e->w.list, &mdev->active_ee);
spin_unlock_irq(&mdev->req_lock);
+ if (mdev->state.conn == C_SYNC_TARGET)
+ wait_event(mdev->ee_wait, !overlapping_resync_write(mdev, e));
+
switch (mdev->net_conf->wire_protocol) {
case DRBD_PROT_C:
inc_unacked(mdev);
@@ -2420,7 +2455,7 @@
mdev->p_uuid[UI_BITMAP] = mdev->p_uuid[UI_HISTORY_START];
mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_HISTORY_START + 1];
- dev_info(DEV, "Did not got last syncUUID packet, corrected:\n");
+ dev_info(DEV, "Lost last syncUUID packet, corrected:\n");
drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
return -1;
@@ -2806,10 +2841,10 @@
if (apv >= 88) {
if (apv == 88) {
- if (data_size > SHARED_SECRET_MAX) {
- dev_err(DEV, "verify-alg too long, "
- "peer wants %u, accepting only %u byte\n",
- data_size, SHARED_SECRET_MAX);
+ if (data_size > SHARED_SECRET_MAX || data_size == 0) {
+ dev_err(DEV, "verify-alg of wrong size, "
+ "peer wants %u, accepting only up to %u byte\n",
+ data_size, SHARED_SECRET_MAX);
return false;
}
@@ -3168,9 +3203,20 @@
os = ns = mdev->state;
spin_unlock_irq(&mdev->req_lock);
- /* peer says his disk is uptodate, while we think it is inconsistent,
- * and this happens while we think we have a sync going on. */
- if (os.pdsk == D_INCONSISTENT && real_peer_disk == D_UP_TO_DATE &&
+ /* If some other part of the code (asender thread, timeout)
+ * already decided to close the connection again,
+ * we must not "re-establish" it here. */
+ if (os.conn <= C_TEAR_DOWN)
+ return false;
+
+ /* If this is the "end of sync" confirmation, usually the peer disk
+ * transitions from D_INCONSISTENT to D_UP_TO_DATE. For empty (0 bits
+ * set) resync started in PausedSyncT, or if the timing of pause-/
+ * unpause-sync events has been "just right", the peer disk may
+ * transition from D_CONSISTENT to D_UP_TO_DATE as well.
+ */
+ if ((os.pdsk == D_INCONSISTENT || os.pdsk == D_CONSISTENT) &&
+ real_peer_disk == D_UP_TO_DATE &&
os.conn > C_CONNECTED && os.disk == D_UP_TO_DATE) {
/* If we are (becoming) SyncSource, but peer is still in sync
* preparation, ignore its uptodate-ness to avoid flapping, it
@@ -3288,7 +3334,7 @@
/* Nowadays only used when forcing a node into primary role and
setting its disk to UpToDate with that */
drbd_send_uuids(mdev);
- drbd_send_state(mdev);
+ drbd_send_current_state(mdev);
}
}
@@ -3776,6 +3822,13 @@
if (mdev->state.conn == C_STANDALONE)
return;
+ /* We are about to start the cleanup after connection loss.
+ * Make sure drbd_make_request knows about that.
+ * Usually we should be in some network failure state already,
+ * but just in case we are not, we fix it up here.
+ */
+ drbd_force_state(mdev, NS(conn, C_NETWORK_FAILURE));
+
/* asender does not clean up anything. it must not interfere, either */
drbd_thread_stop(&mdev->asender);
drbd_free_sock(mdev);
@@ -3803,8 +3856,6 @@
atomic_set(&mdev->rs_pending_cnt, 0);
wake_up(&mdev->misc_wait);
- del_timer(&mdev->request_timer);
-
/* make sure syncer is stopped and w_resume_next_sg queued */
del_timer_sync(&mdev->resync_timer);
resync_timer_fn((unsigned long)mdev);
@@ -4433,7 +4484,7 @@
if (mdev->state.conn == C_AHEAD &&
atomic_read(&mdev->ap_in_flight) == 0 &&
- !test_and_set_bit(AHEAD_TO_SYNC_SOURCE, &mdev->current_epoch->flags)) {
+ !test_and_set_bit(AHEAD_TO_SYNC_SOURCE, &mdev->flags)) {
mdev->start_resync_timer.expires = jiffies + HZ;
add_timer(&mdev->start_resync_timer);
}
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index 4a0f314..9c5c849 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -37,6 +37,7 @@
const int rw = bio_data_dir(bio);
int cpu;
cpu = part_stat_lock();
+ part_round_stats(cpu, &mdev->vdisk->part0);
part_stat_inc(cpu, &mdev->vdisk->part0, ios[rw]);
part_stat_add(cpu, &mdev->vdisk->part0, sectors[rw], bio_sectors(bio));
part_inc_in_flight(&mdev->vdisk->part0, rw);
@@ -214,8 +215,7 @@
{
const unsigned long s = req->rq_state;
struct drbd_conf *mdev = req->mdev;
- /* only WRITES may end up here without a master bio (on barrier ack) */
- int rw = req->master_bio ? bio_data_dir(req->master_bio) : WRITE;
+ int rw = req->rq_state & RQ_WRITE ? WRITE : READ;
/* we must not complete the master bio, while it is
* still being processed by _drbd_send_zc_bio (drbd_send_dblock)
@@ -230,7 +230,7 @@
return;
if (s & RQ_NET_PENDING)
return;
- if (s & RQ_LOCAL_PENDING)
+ if (s & RQ_LOCAL_PENDING && !(s & RQ_LOCAL_ABORTED))
return;
if (req->master_bio) {
@@ -277,6 +277,9 @@
req->master_bio = NULL;
}
+ if (s & RQ_LOCAL_PENDING)
+ return;
+
if ((s & RQ_NET_MASK) == 0 || (s & RQ_NET_DONE)) {
/* this is disconnected (local only) operation,
* or protocol C P_WRITE_ACK,
@@ -429,7 +432,7 @@
break;
case completed_ok:
- if (bio_data_dir(req->master_bio) == WRITE)
+ if (req->rq_state & RQ_WRITE)
mdev->writ_cnt += req->size>>9;
else
mdev->read_cnt += req->size>>9;
@@ -438,7 +441,14 @@
req->rq_state &= ~RQ_LOCAL_PENDING;
_req_may_be_done_not_susp(req, m);
- put_ldev(mdev);
+ break;
+
+ case abort_disk_io:
+ req->rq_state |= RQ_LOCAL_ABORTED;
+ if (req->rq_state & RQ_WRITE)
+ _req_may_be_done_not_susp(req, m);
+ else
+ goto goto_queue_for_net_read;
break;
case write_completed_with_error:
@@ -447,7 +457,6 @@
__drbd_chk_io_error(mdev, false);
_req_may_be_done_not_susp(req, m);
- put_ldev(mdev);
break;
case read_ahead_completed_with_error:
@@ -455,7 +464,6 @@
req->rq_state |= RQ_LOCAL_COMPLETED;
req->rq_state &= ~RQ_LOCAL_PENDING;
_req_may_be_done_not_susp(req, m);
- put_ldev(mdev);
break;
case read_completed_with_error:
@@ -467,7 +475,8 @@
D_ASSERT(!(req->rq_state & RQ_NET_MASK));
__drbd_chk_io_error(mdev, false);
- put_ldev(mdev);
+
+ goto_queue_for_net_read:
/* no point in retrying if there is no good remote data,
* or we have no connection. */
@@ -556,10 +565,8 @@
drbd_queue_work(&mdev->data.work, &req->w);
break;
- case oos_handed_to_network:
- /* actually the same */
+ case read_retry_remote_canceled:
case send_canceled:
- /* treat it the same */
case send_failed:
/* real cleanup will be done from tl_clear. just update flags
* so it is no longer marked as on the worker queue */
@@ -589,17 +596,17 @@
}
req->rq_state &= ~RQ_NET_QUEUED;
req->rq_state |= RQ_NET_SENT;
- /* because _drbd_send_zc_bio could sleep, and may want to
- * dereference the bio even after the "write_acked_by_peer" and
- * "completed_ok" events came in, once we return from
- * _drbd_send_zc_bio (drbd_send_dblock), we have to check
- * whether it is done already, and end it. */
_req_may_be_done_not_susp(req, m);
break;
- case read_retry_remote_canceled:
+ case oos_handed_to_network:
+ /* Was not set PENDING, no longer QUEUED, so is now DONE
+ * as far as this connection is concerned. */
req->rq_state &= ~RQ_NET_QUEUED;
- /* fall through, in case we raced with drbd_disconnect */
+ req->rq_state |= RQ_NET_DONE;
+ _req_may_be_done_not_susp(req, m);
+ break;
+
case connection_lost_while_pending:
/* transfer log cleanup after connection loss */
/* assert something? */
@@ -616,8 +623,6 @@
_req_may_be_done(req, m); /* Allowed while state.susp */
break;
- case write_acked_by_peer_and_sis:
- req->rq_state |= RQ_NET_SIS;
case conflict_discarded_by_peer:
/* for discarded conflicting writes of multiple primaries,
* there is no need to keep anything in the tl, potential
@@ -628,18 +633,15 @@
(unsigned long long)req->sector, req->size);
req->rq_state |= RQ_NET_DONE;
/* fall through */
+ case write_acked_by_peer_and_sis:
case write_acked_by_peer:
+ if (what == write_acked_by_peer_and_sis)
+ req->rq_state |= RQ_NET_SIS;
/* protocol C; successfully written on peer.
- * Nothing to do here.
+ * Nothing more to do here.
* We want to keep the tl in place for all protocols, to cater
- * for volatile write-back caches on lower level devices.
- *
- * A barrier request is expected to have forced all prior
- * requests onto stable storage, so completion of a barrier
- * request could set NET_DONE right here, and not wait for the
- * P_BARRIER_ACK, but that is an unnecessary optimization. */
+ * for volatile write-back caches on lower level devices. */
- /* this makes it effectively the same as for: */
case recv_acked_by_peer:
/* protocol B; pretends to be successfully written on peer.
* see also notes above in handed_over_to_network about
@@ -773,6 +775,7 @@
int local, remote, send_oos = 0;
int err = -EIO;
int ret = 0;
+ union drbd_state s;
/* allocate outside of all locks; */
req = drbd_req_new(mdev, bio);
@@ -834,8 +837,9 @@
drbd_al_begin_io(mdev, sector);
}
- remote = remote && drbd_should_do_remote(mdev->state);
- send_oos = rw == WRITE && drbd_should_send_oos(mdev->state);
+ s = mdev->state;
+ remote = remote && drbd_should_do_remote(s);
+ send_oos = rw == WRITE && drbd_should_send_oos(s);
D_ASSERT(!(remote && send_oos));
if (!(local || remote) && !is_susp(mdev->state)) {
@@ -867,7 +871,7 @@
if (is_susp(mdev->state)) {
/* If we got suspended, use the retry mechanism of
- generic_make_request() to restart processing of this
+ drbd_make_request() to restart processing of this
bio. In the next call to drbd_make_request
we sleep in inc_ap_bio() */
ret = 1;
@@ -1091,7 +1095,6 @@
*/
D_ASSERT(bio->bi_size > 0);
D_ASSERT((bio->bi_size & 0x1ff) == 0);
- D_ASSERT(bio->bi_idx == 0);
/* to make some things easier, force alignment of requests within the
* granularity of our hash tables */
@@ -1099,8 +1102,9 @@
e_enr = (bio->bi_sector+(bio->bi_size>>9)-1) >> HT_SHIFT;
if (likely(s_enr == e_enr)) {
- inc_ap_bio(mdev, 1);
- drbd_make_request_common(mdev, bio, start_time);
+ do {
+ inc_ap_bio(mdev, 1);
+ } while (drbd_make_request_common(mdev, bio, start_time));
return;
}
@@ -1196,36 +1200,66 @@
struct drbd_conf *mdev = (struct drbd_conf *) data;
struct drbd_request *req; /* oldest request */
struct list_head *le;
- unsigned long et = 0; /* effective timeout = ko_count * timeout */
+ unsigned long ent = 0, dt = 0, et, nt; /* effective timeout = ko_count * timeout */
+ unsigned long now;
if (get_net_conf(mdev)) {
- et = mdev->net_conf->timeout*HZ/10 * mdev->net_conf->ko_count;
+ if (mdev->state.conn >= C_WF_REPORT_PARAMS)
+ ent = mdev->net_conf->timeout*HZ/10
+ * mdev->net_conf->ko_count;
put_net_conf(mdev);
}
- if (!et || mdev->state.conn < C_WF_REPORT_PARAMS)
+ if (get_ldev(mdev)) { /* implicit state.disk >= D_INCONSISTENT */
+ dt = mdev->ldev->dc.disk_timeout * HZ / 10;
+ put_ldev(mdev);
+ }
+ et = min_not_zero(dt, ent);
+
+ if (!et)
return; /* Recurring timer stopped */
+ now = jiffies;
+
spin_lock_irq(&mdev->req_lock);
le = &mdev->oldest_tle->requests;
if (list_empty(le)) {
spin_unlock_irq(&mdev->req_lock);
- mod_timer(&mdev->request_timer, jiffies + et);
+ mod_timer(&mdev->request_timer, now + et);
return;
}
le = le->prev;
req = list_entry(le, struct drbd_request, tl_requests);
- if (time_is_before_eq_jiffies(req->start_time + et)) {
- if (req->rq_state & RQ_NET_PENDING) {
- dev_warn(DEV, "Remote failed to finish a request within ko-count * timeout\n");
- _drbd_set_state(_NS(mdev, conn, C_TIMEOUT), CS_VERBOSE, NULL);
- } else {
- dev_warn(DEV, "Local backing block device frozen?\n");
- mod_timer(&mdev->request_timer, jiffies + et);
- }
- } else {
- mod_timer(&mdev->request_timer, req->start_time + et);
- }
+ /* The request is considered timed out, if
+ * - we have some effective timeout from the configuration,
+ * with above state restrictions applied,
+ * - the oldest request is waiting for a response from the network
+ * resp. the local disk,
+ * - the oldest request is in fact older than the effective timeout,
+ * - the connection was established (resp. disk was attached)
+ * for longer than the timeout already.
+ * Note that for 32bit jiffies and very stable connections/disks,
+ * we may have a wrap around, which is catched by
+ * !time_in_range(now, last_..._jif, last_..._jif + timeout).
+ *
+ * Side effect: once per 32bit wrap-around interval, which means every
+ * ~198 days with 250 HZ, we have a window where the timeout would need
+ * to expire twice (worst case) to become effective. Good enough.
+ */
+ if (ent && req->rq_state & RQ_NET_PENDING &&
+ time_after(now, req->start_time + ent) &&
+ !time_in_range(now, mdev->last_reconnect_jif, mdev->last_reconnect_jif + ent)) {
+ dev_warn(DEV, "Remote failed to finish a request within ko-count * timeout\n");
+ _drbd_set_state(_NS(mdev, conn, C_TIMEOUT), CS_VERBOSE | CS_HARD, NULL);
+ }
+ if (dt && req->rq_state & RQ_LOCAL_PENDING &&
+ time_after(now, req->start_time + dt) &&
+ !time_in_range(now, mdev->last_reattach_jif, mdev->last_reattach_jif + dt)) {
+ dev_warn(DEV, "Local backing device failed to meet the disk-timeout\n");
+ __drbd_chk_io_error(mdev, 1);
+ }
+ nt = (time_after(now, req->start_time + et) ? now : req->start_time) + et;
spin_unlock_irq(&mdev->req_lock);
+ mod_timer(&mdev->request_timer, nt);
}
diff --git a/drivers/block/drbd/drbd_req.h b/drivers/block/drbd/drbd_req.h
index 68a234a..3d21119 100644
--- a/drivers/block/drbd/drbd_req.h
+++ b/drivers/block/drbd/drbd_req.h
@@ -105,6 +105,7 @@
read_completed_with_error,
read_ahead_completed_with_error,
write_completed_with_error,
+ abort_disk_io,
completed_ok,
resend,
fail_frozen_disk_io,
@@ -118,18 +119,21 @@
* same time, so we should hold the request lock anyways.
*/
enum drbd_req_state_bits {
- /* 210
- * 000: no local possible
- * 001: to be submitted
+ /* 3210
+ * 0000: no local possible
+ * 0001: to be submitted
* UNUSED, we could map: 011: submitted, completion still pending
- * 110: completed ok
- * 010: completed with error
+ * 0110: completed ok
+ * 0010: completed with error
+ * 1001: Aborted (before completion)
+ * 1x10: Aborted and completed -> free
*/
__RQ_LOCAL_PENDING,
__RQ_LOCAL_COMPLETED,
__RQ_LOCAL_OK,
+ __RQ_LOCAL_ABORTED,
- /* 76543
+ /* 87654
* 00000: no network possible
* 00001: to be send
* 00011: to be send, on worker queue
@@ -199,8 +203,9 @@
#define RQ_LOCAL_PENDING (1UL << __RQ_LOCAL_PENDING)
#define RQ_LOCAL_COMPLETED (1UL << __RQ_LOCAL_COMPLETED)
#define RQ_LOCAL_OK (1UL << __RQ_LOCAL_OK)
+#define RQ_LOCAL_ABORTED (1UL << __RQ_LOCAL_ABORTED)
-#define RQ_LOCAL_MASK ((RQ_LOCAL_OK << 1)-1) /* 0x07 */
+#define RQ_LOCAL_MASK ((RQ_LOCAL_ABORTED << 1)-1)
#define RQ_NET_PENDING (1UL << __RQ_NET_PENDING)
#define RQ_NET_QUEUED (1UL << __RQ_NET_QUEUED)
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index 4d3e6f6..620c70f 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -70,11 +70,29 @@
void drbd_md_io_complete(struct bio *bio, int error)
{
struct drbd_md_io *md_io;
+ struct drbd_conf *mdev;
md_io = (struct drbd_md_io *)bio->bi_private;
+ mdev = container_of(md_io, struct drbd_conf, md_io);
+
md_io->error = error;
- complete(&md_io->event);
+ /* We grabbed an extra reference in _drbd_md_sync_page_io() to be able
+ * to timeout on the lower level device, and eventually detach from it.
+ * If this io completion runs after that timeout expired, this
+ * drbd_md_put_buffer() may allow us to finally try and re-attach.
+ * During normal operation, this only puts that extra reference
+ * down to 1 again.
+ * Make sure we first drop the reference, and only then signal
+ * completion, or we may (in drbd_al_read_log()) cycle so fast into the
+ * next drbd_md_sync_page_io(), that we trigger the
+ * ASSERT(atomic_read(&mdev->md_io_in_use) == 1) there.
+ */
+ drbd_md_put_buffer(mdev);
+ md_io->done = 1;
+ wake_up(&mdev->misc_wait);
+ bio_put(bio);
+ put_ldev(mdev);
}
/* reads on behalf of the partner,
@@ -226,6 +244,7 @@
spin_lock_irqsave(&mdev->req_lock, flags);
__req_mod(req, what, &m);
spin_unlock_irqrestore(&mdev->req_lock, flags);
+ put_ldev(mdev);
if (m.bio)
complete_master_bio(mdev, &m);
@@ -290,7 +309,7 @@
sg_init_table(&sg, 1);
crypto_hash_init(&desc);
- __bio_for_each_segment(bvec, bio, i, 0) {
+ bio_for_each_segment(bvec, bio, i) {
sg_set_page(&sg, bvec->bv_page, bvec->bv_len, bvec->bv_offset);
crypto_hash_update(&desc, &sg, sg.length);
}
@@ -728,7 +747,7 @@
}
drbd_start_resync(mdev, C_SYNC_SOURCE);
- clear_bit(AHEAD_TO_SYNC_SOURCE, &mdev->current_epoch->flags);
+ clear_bit(AHEAD_TO_SYNC_SOURCE, &mdev->flags);
return 1;
}
@@ -1519,14 +1538,14 @@
}
drbd_state_lock(mdev);
-
+ write_lock_irq(&global_state_lock);
if (!get_ldev_if_state(mdev, D_NEGOTIATING)) {
+ write_unlock_irq(&global_state_lock);
drbd_state_unlock(mdev);
return;
}
- write_lock_irq(&global_state_lock);
- ns = mdev->state;
+ ns.i = mdev->state.i;
ns.aftr_isp = !_drbd_may_sync_now(mdev);
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index b0b00d7..cce7df3 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -551,7 +551,7 @@
static void floppy_start(void);
static void process_fd_request(void);
static void recalibrate_floppy(void);
-static void floppy_shutdown(unsigned long);
+static void floppy_shutdown(struct work_struct *);
static int floppy_request_regions(int);
static void floppy_release_regions(int);
@@ -588,6 +588,8 @@
static struct floppy_fdc_state fdc_state[N_FDC];
static int fdc; /* current fdc */
+static struct workqueue_struct *floppy_wq;
+
static struct floppy_struct *_floppy = floppy_type;
static unsigned char current_drive;
static long current_count_sectors;
@@ -629,16 +631,15 @@
static inline void debugt(const char *func, const char *msg) { }
#endif /* DEBUGT */
-typedef void (*timeout_fn)(unsigned long);
-static DEFINE_TIMER(fd_timeout, floppy_shutdown, 0, 0);
+static DECLARE_DELAYED_WORK(fd_timeout, floppy_shutdown);
static const char *timeout_message;
static void is_alive(const char *func, const char *message)
{
/* this routine checks whether the floppy driver is "alive" */
if (test_bit(0, &fdc_busy) && command_status < 2 &&
- !timer_pending(&fd_timeout)) {
+ !delayed_work_pending(&fd_timeout)) {
DPRINT("%s: timeout handler died. %s\n", func, message);
}
}
@@ -666,15 +667,18 @@
static void __reschedule_timeout(int drive, const char *message)
{
+ unsigned long delay;
+
if (drive == current_reqD)
drive = current_drive;
- del_timer(&fd_timeout);
+
if (drive < 0 || drive >= N_DRIVE) {
- fd_timeout.expires = jiffies + 20UL * HZ;
+ delay = 20UL * HZ;
drive = 0;
} else
- fd_timeout.expires = jiffies + UDP->timeout;
- add_timer(&fd_timeout);
+ delay = UDP->timeout;
+
+ queue_delayed_work(floppy_wq, &fd_timeout, delay);
if (UDP->flags & FD_DEBUG)
DPRINT("reschedule timeout %s\n", message);
timeout_message = message;
@@ -872,7 +876,7 @@
command_status = FD_COMMAND_NONE;
- __reschedule_timeout(drive, "lock fdc");
+ reschedule_timeout(drive, "lock fdc");
set_fdc(drive);
return 0;
}
@@ -880,23 +884,15 @@
/* unlocks the driver */
static void unlock_fdc(void)
{
- unsigned long flags;
-
- raw_cmd = NULL;
if (!test_bit(0, &fdc_busy))
DPRINT("FDC access conflict!\n");
- if (do_floppy)
- DPRINT("device interrupt still active at FDC release: %pf!\n",
- do_floppy);
+ raw_cmd = NULL;
command_status = FD_COMMAND_NONE;
- spin_lock_irqsave(&floppy_lock, flags);
- del_timer(&fd_timeout);
+ __cancel_delayed_work(&fd_timeout);
+ do_floppy = NULL;
cont = NULL;
clear_bit(0, &fdc_busy);
- if (current_req || set_next_request())
- do_fd_request(current_req->q);
- spin_unlock_irqrestore(&floppy_lock, flags);
wake_up(&fdc_wait);
}
@@ -968,26 +964,24 @@
static void schedule_bh(void (*handler)(void))
{
+ WARN_ON(work_pending(&floppy_work));
+
PREPARE_WORK(&floppy_work, (work_func_t)handler);
- schedule_work(&floppy_work);
+ queue_work(floppy_wq, &floppy_work);
}
-static DEFINE_TIMER(fd_timer, NULL, 0, 0);
+static DECLARE_DELAYED_WORK(fd_timer, NULL);
static void cancel_activity(void)
{
- unsigned long flags;
-
- spin_lock_irqsave(&floppy_lock, flags);
do_floppy = NULL;
- PREPARE_WORK(&floppy_work, (work_func_t)empty);
- del_timer(&fd_timer);
- spin_unlock_irqrestore(&floppy_lock, flags);
+ cancel_delayed_work_sync(&fd_timer);
+ cancel_work_sync(&floppy_work);
}
/* this function makes sure that the disk stays in the drive during the
* transfer */
-static void fd_watchdog(void)
+static void fd_watchdog(struct work_struct *arg)
{
debug_dcl(DP->flags, "calling disk change from watchdog\n");
@@ -997,21 +991,20 @@
cont->done(0);
reset_fdc();
} else {
- del_timer(&fd_timer);
- fd_timer.function = (timeout_fn)fd_watchdog;
- fd_timer.expires = jiffies + HZ / 10;
- add_timer(&fd_timer);
+ cancel_delayed_work(&fd_timer);
+ PREPARE_DELAYED_WORK(&fd_timer, fd_watchdog);
+ queue_delayed_work(floppy_wq, &fd_timer, HZ / 10);
}
}
static void main_command_interrupt(void)
{
- del_timer(&fd_timer);
+ cancel_delayed_work(&fd_timer);
cont->interrupt();
}
/* waits for a delay (spinup or select) to pass */
-static int fd_wait_for_completion(unsigned long delay, timeout_fn function)
+static int fd_wait_for_completion(unsigned long expires, work_func_t function)
{
if (FDCS->reset) {
reset_fdc(); /* do the reset during sleep to win time
@@ -1020,11 +1013,10 @@
return 1;
}
- if (time_before(jiffies, delay)) {
- del_timer(&fd_timer);
- fd_timer.function = function;
- fd_timer.expires = delay;
- add_timer(&fd_timer);
+ if (time_before(jiffies, expires)) {
+ cancel_delayed_work(&fd_timer);
+ PREPARE_DELAYED_WORK(&fd_timer, function);
+ queue_delayed_work(floppy_wq, &fd_timer, expires - jiffies);
return 1;
}
return 0;
@@ -1342,7 +1334,7 @@
*/
FDCS->dtr = raw_cmd->rate & 3;
return fd_wait_for_completion(jiffies + 2UL * HZ / 100,
- (timeout_fn)floppy_ready);
+ (work_func_t)floppy_ready);
} /* fdc_dtr */
static void tell_sector(void)
@@ -1447,7 +1439,7 @@
int flags;
int dflags;
unsigned long ready_date;
- timeout_fn function;
+ work_func_t function;
flags = raw_cmd->flags;
if (flags & (FD_RAW_READ | FD_RAW_WRITE))
@@ -1461,9 +1453,9 @@
*/
if (time_after(ready_date, jiffies + DP->select_delay)) {
ready_date -= DP->select_delay;
- function = (timeout_fn)floppy_start;
+ function = (work_func_t)floppy_start;
} else
- function = (timeout_fn)setup_rw_floppy;
+ function = (work_func_t)setup_rw_floppy;
/* wait until the floppy is spinning fast enough */
if (fd_wait_for_completion(ready_date, function))
@@ -1493,7 +1485,7 @@
inr = result();
cont->interrupt();
} else if (flags & FD_RAW_NEED_DISK)
- fd_watchdog();
+ fd_watchdog(NULL);
}
static int blind_seek;
@@ -1802,20 +1794,22 @@
pr_info("do_floppy=%pf\n", do_floppy);
if (work_pending(&floppy_work))
pr_info("floppy_work.func=%pf\n", floppy_work.func);
- if (timer_pending(&fd_timer))
- pr_info("fd_timer.function=%pf\n", fd_timer.function);
- if (timer_pending(&fd_timeout)) {
- pr_info("timer_function=%pf\n", fd_timeout.function);
- pr_info("expires=%lu\n", fd_timeout.expires - jiffies);
- pr_info("now=%lu\n", jiffies);
- }
+ if (delayed_work_pending(&fd_timer))
+ pr_info("delayed work.function=%p expires=%ld\n",
+ fd_timer.work.func,
+ fd_timer.timer.expires - jiffies);
+ if (delayed_work_pending(&fd_timeout))
+ pr_info("timer_function=%p expires=%ld\n",
+ fd_timeout.work.func,
+ fd_timeout.timer.expires - jiffies);
+
pr_info("cont=%p\n", cont);
pr_info("current_req=%p\n", current_req);
pr_info("command_status=%d\n", command_status);
pr_info("\n");
}
-static void floppy_shutdown(unsigned long data)
+static void floppy_shutdown(struct work_struct *arg)
{
unsigned long flags;
@@ -1868,7 +1862,7 @@
/* wait_for_completion also schedules reset if needed. */
return fd_wait_for_completion(DRS->select_date + DP->select_delay,
- (timeout_fn)function);
+ (work_func_t)function);
}
static void floppy_ready(void)
@@ -2821,7 +2815,6 @@
spin_lock_irq(&floppy_lock);
pending = set_next_request();
spin_unlock_irq(&floppy_lock);
-
if (!pending) {
do_floppy = NULL;
unlock_fdc();
@@ -2898,13 +2891,15 @@
current_req->cmd_flags))
return;
- if (test_bit(0, &fdc_busy)) {
+ if (test_and_set_bit(0, &fdc_busy)) {
/* fdc busy, this new request will be treated when the
current one is done */
is_alive(__func__, "old request running");
return;
}
- lock_fdc(MAXTIMEOUT, false);
+ command_status = FD_COMMAND_NONE;
+ __reschedule_timeout(MAXTIMEOUT, "fd_request");
+ set_fdc(0);
process_fd_request();
is_alive(__func__, "");
}
@@ -3612,9 +3607,7 @@
mutex_lock(&floppy_mutex);
mutex_lock(&open_lock);
- if (UDRS->fd_ref < 0)
- UDRS->fd_ref = 0;
- else if (!UDRS->fd_ref--) {
+ if (!UDRS->fd_ref--) {
DPRINT("floppy_release with fd_ref == 0");
UDRS->fd_ref = 0;
}
@@ -3650,13 +3643,7 @@
set_bit(FD_VERIFY_BIT, &UDRS->flags);
}
- if (UDRS->fd_ref == -1 || (UDRS->fd_ref && (mode & FMODE_EXCL)))
- goto out2;
-
- if (mode & FMODE_EXCL)
- UDRS->fd_ref = -1;
- else
- UDRS->fd_ref++;
+ UDRS->fd_ref++;
opened_bdev[drive] = bdev;
@@ -3719,10 +3706,8 @@
mutex_unlock(&floppy_mutex);
return 0;
out:
- if (UDRS->fd_ref < 0)
- UDRS->fd_ref = 0;
- else
- UDRS->fd_ref--;
+ UDRS->fd_ref--;
+
if (!UDRS->fd_ref)
opened_bdev[drive] = NULL;
out2:
@@ -4159,10 +4144,16 @@
goto out_put_disk;
}
+ floppy_wq = alloc_ordered_workqueue("floppy", 0);
+ if (!floppy_wq) {
+ err = -ENOMEM;
+ goto out_put_disk;
+ }
+
disks[dr]->queue = blk_init_queue(do_fd_request, &floppy_lock);
if (!disks[dr]->queue) {
err = -ENOMEM;
- goto out_put_disk;
+ goto out_destroy_workq;
}
blk_queue_max_hw_sectors(disks[dr]->queue, 64);
@@ -4213,7 +4204,7 @@
use_virtual_dma = can_use_virtual_dma & 1;
fdc_state[0].address = FDC1;
if (fdc_state[0].address == -1) {
- del_timer_sync(&fd_timeout);
+ cancel_delayed_work(&fd_timeout);
err = -ENODEV;
goto out_unreg_region;
}
@@ -4224,7 +4215,7 @@
fdc = 0; /* reset fdc in case of unexpected interrupt */
err = floppy_grab_irq_and_dma();
if (err) {
- del_timer_sync(&fd_timeout);
+ cancel_delayed_work(&fd_timeout);
err = -EBUSY;
goto out_unreg_region;
}
@@ -4281,13 +4272,13 @@
user_reset_fdc(-1, FD_RESET_ALWAYS, false);
}
fdc = 0;
- del_timer_sync(&fd_timeout);
+ cancel_delayed_work(&fd_timeout);
current_drive = 0;
initialized = true;
if (have_no_fdc) {
DPRINT("no floppy controllers found\n");
err = have_no_fdc;
- goto out_flush_work;
+ goto out_release_dma;
}
for (drive = 0; drive < N_DRIVE; drive++) {
@@ -4302,7 +4293,7 @@
err = platform_device_register(&floppy_device[drive]);
if (err)
- goto out_flush_work;
+ goto out_release_dma;
err = device_create_file(&floppy_device[drive].dev,
&dev_attr_cmos);
@@ -4320,13 +4311,14 @@
out_unreg_platform_dev:
platform_device_unregister(&floppy_device[drive]);
-out_flush_work:
- flush_work_sync(&floppy_work);
+out_release_dma:
if (atomic_read(&usage_count))
floppy_release_irq_and_dma();
out_unreg_region:
blk_unregister_region(MKDEV(FLOPPY_MAJOR, 0), 256);
platform_driver_unregister(&floppy_driver);
+out_destroy_workq:
+ destroy_workqueue(floppy_wq);
out_unreg_blkdev:
unregister_blkdev(FLOPPY_MAJOR, "fd");
out_put_disk:
@@ -4397,7 +4389,7 @@
* We might have scheduled a free_irq(), wait it to
* drain first:
*/
- flush_work_sync(&floppy_work);
+ flush_workqueue(floppy_wq);
if (fd_request_irq()) {
DPRINT("Unable to grab IRQ%d for the floppy driver\n",
@@ -4488,9 +4480,9 @@
pr_info("motor off timer %d still active\n", drive);
#endif
- if (timer_pending(&fd_timeout))
+ if (delayed_work_pending(&fd_timeout))
pr_info("floppy timer still active:%s\n", timeout_message);
- if (timer_pending(&fd_timer))
+ if (delayed_work_pending(&fd_timer))
pr_info("auxiliary floppy timer still active\n");
if (work_pending(&floppy_work))
pr_info("work still pending\n");
@@ -4560,8 +4552,9 @@
put_disk(disks[drive]);
}
- del_timer_sync(&fd_timeout);
- del_timer_sync(&fd_timer);
+ cancel_delayed_work_sync(&fd_timeout);
+ cancel_delayed_work_sync(&fd_timer);
+ destroy_workqueue(floppy_wq);
if (atomic_read(&usage_count))
floppy_release_irq_and_dma();
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 013c7a5..65665c9 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -141,7 +141,7 @@
struct rbd_snap {
struct device dev;
const char *name;
- size_t size;
+ u64 size;
struct list_head node;
u64 id;
};
@@ -175,8 +175,7 @@
/* protects updating the header */
struct rw_semaphore header_rwsem;
char snap_name[RBD_MAX_SNAP_NAME_LEN];
- u32 cur_snap; /* index+1 of current snapshot within snap context
- 0 - for the head */
+ u64 snap_id; /* current snapshot id */
int read_only;
struct list_head node;
@@ -241,7 +240,7 @@
put_device(&rbd_dev->dev);
}
-static int __rbd_update_snaps(struct rbd_device *rbd_dev);
+static int __rbd_refresh_header(struct rbd_device *rbd_dev);
static int rbd_open(struct block_device *bdev, fmode_t mode)
{
@@ -450,7 +449,9 @@
struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref);
dout("rbd_release_client %p\n", rbdc);
+ spin_lock(&rbd_client_list_lock);
list_del(&rbdc->node);
+ spin_unlock(&rbd_client_list_lock);
ceph_destroy_client(rbdc->client);
kfree(rbdc->rbd_opts);
@@ -463,9 +464,7 @@
*/
static void rbd_put_client(struct rbd_device *rbd_dev)
{
- spin_lock(&rbd_client_list_lock);
kref_put(&rbd_dev->rbd_client->kref, rbd_client_release);
- spin_unlock(&rbd_client_list_lock);
rbd_dev->rbd_client = NULL;
}
@@ -487,16 +486,18 @@
*/
static int rbd_header_from_disk(struct rbd_image_header *header,
struct rbd_image_header_ondisk *ondisk,
- int allocated_snaps,
+ u32 allocated_snaps,
gfp_t gfp_flags)
{
- int i;
- u32 snap_count;
+ u32 i, snap_count;
if (memcmp(ondisk, RBD_HEADER_TEXT, sizeof(RBD_HEADER_TEXT)))
return -ENXIO;
snap_count = le32_to_cpu(ondisk->snap_count);
+ if (snap_count > (UINT_MAX - sizeof(struct ceph_snap_context))
+ / sizeof (*ondisk))
+ return -EINVAL;
header->snapc = kmalloc(sizeof(struct ceph_snap_context) +
snap_count * sizeof (*ondisk),
gfp_flags);
@@ -506,11 +507,11 @@
header->snap_names_len = le64_to_cpu(ondisk->snap_names_len);
if (snap_count) {
header->snap_names = kmalloc(header->snap_names_len,
- GFP_KERNEL);
+ gfp_flags);
if (!header->snap_names)
goto err_snapc;
header->snap_sizes = kmalloc(snap_count * sizeof(u64),
- GFP_KERNEL);
+ gfp_flags);
if (!header->snap_sizes)
goto err_names;
} else {
@@ -552,21 +553,6 @@
return -ENOMEM;
}
-static int snap_index(struct rbd_image_header *header, int snap_num)
-{
- return header->total_snaps - snap_num;
-}
-
-static u64 cur_snap_id(struct rbd_device *rbd_dev)
-{
- struct rbd_image_header *header = &rbd_dev->header;
-
- if (!rbd_dev->cur_snap)
- return 0;
-
- return header->snapc->snaps[snap_index(header, rbd_dev->cur_snap)];
-}
-
static int snap_by_name(struct rbd_image_header *header, const char *snap_name,
u64 *seq, u64 *size)
{
@@ -605,7 +591,7 @@
snapc->seq = header->snap_seq;
else
snapc->seq = 0;
- dev->cur_snap = 0;
+ dev->snap_id = CEPH_NOSNAP;
dev->read_only = 0;
if (size)
*size = header->image_size;
@@ -613,8 +599,7 @@
ret = snap_by_name(header, dev->snap_name, &snapc->seq, size);
if (ret < 0)
goto done;
-
- dev->cur_snap = header->total_snaps - ret;
+ dev->snap_id = snapc->seq;
dev->read_only = 1;
}
@@ -935,7 +920,6 @@
layout->fl_stripe_unit = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
layout->fl_stripe_count = cpu_to_le32(1);
layout->fl_object_size = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
- layout->fl_pg_preferred = cpu_to_le32(-1);
layout->fl_pg_pool = cpu_to_le32(dev->poolid);
ceph_calc_raw_layout(osdc, layout, snapid, ofs, &len, &bno,
req, ops);
@@ -1168,7 +1152,7 @@
int coll_index)
{
return rbd_do_op(rq, rbd_dev, NULL,
- (snapid ? snapid : CEPH_NOSNAP),
+ snapid,
CEPH_OSD_OP_READ,
CEPH_OSD_FLAG_READ,
2,
@@ -1187,7 +1171,7 @@
u64 *ver)
{
return rbd_req_sync_op(dev, NULL,
- (snapid ? snapid : CEPH_NOSNAP),
+ snapid,
CEPH_OSD_OP_READ,
CEPH_OSD_FLAG_READ,
NULL,
@@ -1238,7 +1222,7 @@
dout("rbd_watch_cb %s notify_id=%lld opcode=%d\n", dev->obj_md_name,
notify_id, (int)opcode);
mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
- rc = __rbd_update_snaps(dev);
+ rc = __rbd_refresh_header(dev);
mutex_unlock(&ctl_mutex);
if (rc)
pr_warning(RBD_DRV_NAME "%d got notification but failed to "
@@ -1521,7 +1505,7 @@
coll, cur_seg);
else
rbd_req_read(rq, rbd_dev,
- cur_snap_id(rbd_dev),
+ rbd_dev->snap_id,
ofs,
op_size, bio,
coll, cur_seg);
@@ -1592,7 +1576,7 @@
{
ssize_t rc;
struct rbd_image_header_ondisk *dh;
- int snap_count = 0;
+ u32 snap_count = 0;
u64 ver;
size_t len;
@@ -1656,7 +1640,7 @@
struct ceph_mon_client *monc;
/* we should create a snapshot only if we're pointing at the head */
- if (dev->cur_snap)
+ if (dev->snap_id != CEPH_NOSNAP)
return -EINVAL;
monc = &dev->rbd_client->client->monc;
@@ -1683,7 +1667,9 @@
if (ret < 0)
return ret;
- dev->header.snapc->seq = new_snapid;
+ down_write(&dev->header_rwsem);
+ dev->header.snapc->seq = new_snapid;
+ up_write(&dev->header_rwsem);
return 0;
bad:
@@ -1703,7 +1689,7 @@
/*
* only read the first part of the ondisk header, without the snaps info
*/
-static int __rbd_update_snaps(struct rbd_device *rbd_dev)
+static int __rbd_refresh_header(struct rbd_device *rbd_dev)
{
int ret;
struct rbd_image_header h;
@@ -1890,7 +1876,7 @@
mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
- rc = __rbd_update_snaps(rbd_dev);
+ rc = __rbd_refresh_header(rbd_dev);
if (rc < 0)
ret = rc;
@@ -1949,7 +1935,7 @@
{
struct rbd_snap *snap = container_of(dev, struct rbd_snap, dev);
- return sprintf(buf, "%zd\n", snap->size);
+ return sprintf(buf, "%llu\n", (unsigned long long)snap->size);
}
static ssize_t rbd_snap_id_show(struct device *dev,
@@ -1958,7 +1944,7 @@
{
struct rbd_snap *snap = container_of(dev, struct rbd_snap, dev);
- return sprintf(buf, "%llu\n", (unsigned long long) snap->id);
+ return sprintf(buf, "%llu\n", (unsigned long long)snap->id);
}
static DEVICE_ATTR(snap_size, S_IRUGO, rbd_snap_size_show, NULL);
@@ -2173,7 +2159,7 @@
rbd_dev->header.obj_version);
if (ret == -ERANGE) {
mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
- rc = __rbd_update_snaps(rbd_dev);
+ rc = __rbd_refresh_header(rbd_dev);
mutex_unlock(&ctl_mutex);
if (rc < 0)
return rc;
@@ -2558,7 +2544,7 @@
if (ret < 0)
goto err_unlock;
- ret = __rbd_update_snaps(rbd_dev);
+ ret = __rbd_refresh_header(rbd_dev);
if (ret < 0)
goto err_unlock;
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 4e86393..60eed4b 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -526,6 +526,14 @@
return 0;
}
+static char *encode_disk_name(char *ptr, unsigned int n)
+{
+ if (n >= 26)
+ ptr = encode_disk_name(ptr, n / 26 - 1);
+ *ptr = 'a' + n % 26;
+ return ptr + 1;
+}
+
static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
struct blkfront_info *info,
u16 vdisk_info, u16 sector_size)
@@ -536,6 +544,7 @@
unsigned int offset;
int minor;
int nr_parts;
+ char *ptr;
BUG_ON(info->gd != NULL);
BUG_ON(info->rq != NULL);
@@ -560,7 +569,11 @@
"emulated IDE disks,\n\t choose an xvd device name"
"from xvde on\n", info->vdevice);
}
- err = -ENODEV;
+ if (minor >> MINORBITS) {
+ pr_warn("blkfront: %#x's minor (%#x) out of range; ignoring\n",
+ info->vdevice, minor);
+ return -ENODEV;
+ }
if ((minor % nr_parts) == 0)
nr_minors = nr_parts;
@@ -574,23 +587,14 @@
if (gd == NULL)
goto release;
- if (nr_minors > 1) {
- if (offset < 26)
- sprintf(gd->disk_name, "%s%c", DEV_NAME, 'a' + offset);
- else
- sprintf(gd->disk_name, "%s%c%c", DEV_NAME,
- 'a' + ((offset / 26)-1), 'a' + (offset % 26));
- } else {
- if (offset < 26)
- sprintf(gd->disk_name, "%s%c%d", DEV_NAME,
- 'a' + offset,
- minor & (nr_parts - 1));
- else
- sprintf(gd->disk_name, "%s%c%c%d", DEV_NAME,
- 'a' + ((offset / 26) - 1),
- 'a' + (offset % 26),
- minor & (nr_parts - 1));
- }
+ strcpy(gd->disk_name, DEV_NAME);
+ ptr = encode_disk_name(gd->disk_name + sizeof(DEV_NAME) - 1, offset);
+ BUG_ON(ptr >= gd->disk_name + DISK_NAME_LEN);
+ if (nr_minors > 1)
+ *ptr = 0;
+ else
+ snprintf(ptr, gd->disk_name + DISK_NAME_LEN - ptr,
+ "%d", minor & (nr_parts - 1));
gd->major = XENVBD_MAJOR;
gd->first_minor = minor;
@@ -1496,7 +1500,9 @@
static void __exit xlblk_exit(void)
{
- return xenbus_unregister_driver(&blkfront_driver);
+ xenbus_unregister_driver(&blkfront_driver);
+ unregister_blkdev(XENVBD_MAJOR, DEV_NAME);
+ kfree(minors);
}
module_exit(xlblk_exit);
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 92cea9d..08a7aa7 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -2116,7 +2116,7 @@
return ret;
}
-static int format_check(struct drm_mode_fb_cmd2 *r)
+static int format_check(const struct drm_mode_fb_cmd2 *r)
{
uint32_t format = r->pixel_format & ~DRM_FORMAT_BIG_ENDIAN;
@@ -2185,7 +2185,7 @@
}
}
-static int framebuffer_check(struct drm_mode_fb_cmd2 *r)
+static int framebuffer_check(const struct drm_mode_fb_cmd2 *r)
{
int ret, hsub, vsub, num_planes, i;
@@ -3126,7 +3126,7 @@
EXPORT_SYMBOL(drm_mode_connector_update_edid_property);
static bool drm_property_change_is_valid(struct drm_property *property,
- __u64 value)
+ uint64_t value)
{
if (property->flags & DRM_MODE_PROP_IMMUTABLE)
return false;
@@ -3136,7 +3136,7 @@
return true;
} else if (property->flags & DRM_MODE_PROP_BITMASK) {
int i;
- __u64 valid_mask = 0;
+ uint64_t valid_mask = 0;
for (i = 0; i < property->num_values; i++)
valid_mask |= (1ULL << property->values[i]);
return !(value & ~valid_mask);
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 608bddf..c3b5139 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -66,6 +66,8 @@
#define EDID_QUIRK_FIRST_DETAILED_PREFERRED (1 << 5)
/* use +hsync +vsync for detailed mode */
#define EDID_QUIRK_DETAILED_SYNC_PP (1 << 6)
+/* Force reduced-blanking timings for detailed modes */
+#define EDID_QUIRK_FORCE_REDUCED_BLANKING (1 << 7)
struct detailed_mode_closure {
struct drm_connector *connector;
@@ -120,6 +122,9 @@
/* Samsung SyncMaster 22[5-6]BW */
{ "SAM", 596, EDID_QUIRK_PREFER_LARGE_60 },
{ "SAM", 638, EDID_QUIRK_PREFER_LARGE_60 },
+
+ /* ViewSonic VA2026w */
+ { "VSC", 5020, EDID_QUIRK_FORCE_REDUCED_BLANKING },
};
/*** DDC fetch and block validation ***/
@@ -885,12 +890,19 @@
"Wrong Hsync/Vsync pulse width\n");
return NULL;
}
+
+ if (quirks & EDID_QUIRK_FORCE_REDUCED_BLANKING) {
+ mode = drm_cvt_mode(dev, hactive, vactive, 60, true, false, false);
+ if (!mode)
+ return NULL;
+
+ goto set_size;
+ }
+
mode = drm_mode_create(dev);
if (!mode)
return NULL;
- mode->type = DRM_MODE_TYPE_DRIVER;
-
if (quirks & EDID_QUIRK_135_CLOCK_TOO_HIGH)
timing->pixel_clock = cpu_to_le16(1088);
@@ -914,8 +926,6 @@
drm_mode_do_interlace_quirk(mode, pt);
- drm_mode_set_name(mode);
-
if (quirks & EDID_QUIRK_DETAILED_SYNC_PP) {
pt->misc |= DRM_EDID_PT_HSYNC_POSITIVE | DRM_EDID_PT_VSYNC_POSITIVE;
}
@@ -925,6 +935,7 @@
mode->flags |= (pt->misc & DRM_EDID_PT_VSYNC_POSITIVE) ?
DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC;
+set_size:
mode->width_mm = pt->width_mm_lo | (pt->width_height_mm_hi & 0xf0) << 4;
mode->height_mm = pt->height_mm_lo | (pt->width_height_mm_hi & 0xf) << 8;
@@ -938,6 +949,9 @@
mode->height_mm = edid->height_cm * 10;
}
+ mode->type = DRM_MODE_TYPE_DRIVER;
+ drm_mode_set_name(mode);
+
return mode;
}
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index eb2b3c2..5363e9c 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -2032,6 +2032,8 @@
1, minor);
drm_debugfs_remove_files((struct drm_info_list *) &i915_ring_stop_fops,
1, minor);
+ drm_debugfs_remove_files((struct drm_info_list *) &i915_error_state_fops,
+ 1, minor);
}
#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index c1e5c66..288d7b8 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2063,10 +2063,8 @@
if (obj->gtt_space == NULL)
return 0;
- if (obj->pin_count != 0) {
- DRM_ERROR("Attempting to unbind pinned buffer\n");
- return -EINVAL;
- }
+ if (obj->pin_count)
+ return -EBUSY;
ret = i915_gem_object_finish_gpu(obj);
if (ret)
@@ -3293,6 +3291,7 @@
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
struct address_space *mapping;
+ u32 mask;
obj = kzalloc(sizeof(*obj), GFP_KERNEL);
if (obj == NULL)
@@ -3303,8 +3302,15 @@
return NULL;
}
+ mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
+ if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
+ /* 965gm cannot relocate objects above 4GiB. */
+ mask &= ~__GFP_HIGHMEM;
+ mask |= __GFP_DMA32;
+ }
+
mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
- mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE);
+ mapping_set_gfp_mask(mapping, mask);
i915_gem_info_add_obj(dev_priv, size);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index cc4a633..1417660 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -350,8 +350,8 @@
{
drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
rps_work);
- u8 new_delay = dev_priv->cur_delay;
u32 pm_iir, pm_imr;
+ u8 new_delay;
spin_lock_irq(&dev_priv->rps_lock);
pm_iir = dev_priv->pm_iir;
@@ -360,41 +360,18 @@
I915_WRITE(GEN6_PMIMR, 0);
spin_unlock_irq(&dev_priv->rps_lock);
- if (!pm_iir)
+ if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0)
return;
mutex_lock(&dev_priv->dev->struct_mutex);
- if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
- if (dev_priv->cur_delay != dev_priv->max_delay)
- new_delay = dev_priv->cur_delay + 1;
- if (new_delay > dev_priv->max_delay)
- new_delay = dev_priv->max_delay;
- } else if (pm_iir & (GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT)) {
- gen6_gt_force_wake_get(dev_priv);
- if (dev_priv->cur_delay != dev_priv->min_delay)
- new_delay = dev_priv->cur_delay - 1;
- if (new_delay < dev_priv->min_delay) {
- new_delay = dev_priv->min_delay;
- I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
- I915_READ(GEN6_RP_INTERRUPT_LIMITS) |
- ((new_delay << 16) & 0x3f0000));
- } else {
- /* Make sure we continue to get down interrupts
- * until we hit the minimum frequency */
- I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
- I915_READ(GEN6_RP_INTERRUPT_LIMITS) & ~0x3f0000);
- }
- gen6_gt_force_wake_put(dev_priv);
- }
+
+ if (pm_iir & GEN6_PM_RP_UP_THRESHOLD)
+ new_delay = dev_priv->cur_delay + 1;
+ else
+ new_delay = dev_priv->cur_delay - 1;
gen6_set_rps(dev_priv->dev, new_delay);
- dev_priv->cur_delay = new_delay;
- /*
- * rps_lock not held here because clearing is non-destructive. There is
- * an *extremely* unlikely race with gen6_rps_enable() that is prevented
- * by holding struct_mutex for the duration of the write.
- */
mutex_unlock(&dev_priv->dev->struct_mutex);
}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index ee61ad1..91478942 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -910,9 +910,10 @@
/* For ILK+ */
static void assert_pch_pll(struct drm_i915_private *dev_priv,
- struct intel_crtc *intel_crtc, bool state)
+ struct intel_pch_pll *pll,
+ struct intel_crtc *crtc,
+ bool state)
{
- int reg;
u32 val;
bool cur_state;
@@ -921,30 +922,37 @@
return;
}
- if (!intel_crtc->pch_pll) {
- WARN(1, "asserting PCH PLL enabled with no PLL\n");
+ if (WARN (!pll,
+ "asserting PCH PLL %s with no PLL\n", state_string(state)))
return;
- }
- if (HAS_PCH_CPT(dev_priv->dev)) {
+ val = I915_READ(pll->pll_reg);
+ cur_state = !!(val & DPLL_VCO_ENABLE);
+ WARN(cur_state != state,
+ "PCH PLL state for reg %x assertion failure (expected %s, current %s), val=%08x\n",
+ pll->pll_reg, state_string(state), state_string(cur_state), val);
+
+ /* Make sure the selected PLL is correctly attached to the transcoder */
+ if (crtc && HAS_PCH_CPT(dev_priv->dev)) {
u32 pch_dpll;
pch_dpll = I915_READ(PCH_DPLL_SEL);
-
- /* Make sure the selected PLL is enabled to the transcoder */
- WARN(!((pch_dpll >> (4 * intel_crtc->pipe)) & 8),
- "transcoder %d PLL not enabled\n", intel_crtc->pipe);
+ cur_state = pll->pll_reg == _PCH_DPLL_B;
+ if (!WARN(((pch_dpll >> (4 * crtc->pipe)) & 1) != cur_state,
+ "PLL[%d] not attached to this transcoder %d: %08x\n",
+ cur_state, crtc->pipe, pch_dpll)) {
+ cur_state = !!(val >> (4*crtc->pipe + 3));
+ WARN(cur_state != state,
+ "PLL[%d] not %s on this transcoder %d: %08x\n",
+ pll->pll_reg == _PCH_DPLL_B,
+ state_string(state),
+ crtc->pipe,
+ val);
+ }
}
-
- reg = intel_crtc->pch_pll->pll_reg;
- val = I915_READ(reg);
- cur_state = !!(val & DPLL_VCO_ENABLE);
- WARN(cur_state != state,
- "PCH PLL state assertion failure (expected %s, current %s)\n",
- state_string(state), state_string(cur_state));
}
-#define assert_pch_pll_enabled(d, p) assert_pch_pll(d, p, true)
-#define assert_pch_pll_disabled(d, p) assert_pch_pll(d, p, false)
+#define assert_pch_pll_enabled(d, p, c) assert_pch_pll(d, p, c, true)
+#define assert_pch_pll_disabled(d, p, c) assert_pch_pll(d, p, c, false)
static void assert_fdi_tx(struct drm_i915_private *dev_priv,
enum pipe pipe, bool state)
@@ -1424,7 +1432,7 @@
assert_pch_refclk_enabled(dev_priv);
if (pll->active++ && pll->on) {
- assert_pch_pll_enabled(dev_priv, intel_crtc);
+ assert_pch_pll_enabled(dev_priv, pll, NULL);
return;
}
@@ -1460,12 +1468,12 @@
intel_crtc->base.base.id);
if (WARN_ON(pll->active == 0)) {
- assert_pch_pll_disabled(dev_priv, intel_crtc);
+ assert_pch_pll_disabled(dev_priv, pll, NULL);
return;
}
if (--pll->active) {
- assert_pch_pll_enabled(dev_priv, intel_crtc);
+ assert_pch_pll_enabled(dev_priv, pll, NULL);
return;
}
@@ -1495,7 +1503,9 @@
BUG_ON(dev_priv->info->gen < 5);
/* Make sure PCH DPLL is enabled */
- assert_pch_pll_enabled(dev_priv, to_intel_crtc(crtc));
+ assert_pch_pll_enabled(dev_priv,
+ to_intel_crtc(crtc)->pch_pll,
+ to_intel_crtc(crtc));
/* FDI must be feeding us bits for PCH ports */
assert_fdi_tx_enabled(dev_priv, pipe);
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 71c7096..296cfc2 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -266,6 +266,9 @@
if (mode->clock < 10000)
return MODE_CLOCK_LOW;
+ if (mode->flags & DRM_MODE_FLAG_DBLCLK)
+ return MODE_H_ILLEGAL;
+
return MODE_OK;
}
@@ -702,6 +705,9 @@
mode->clock = intel_dp->panel_fixed_mode->clock;
}
+ if (mode->flags & DRM_MODE_FLAG_DBLCLK)
+ return false;
+
DRM_DEBUG_KMS("DP link computation with max lane count %i "
"max bw %02x pixel clock %iKHz\n",
max_lane_count, bws[max_clock], mode->clock);
@@ -1154,11 +1160,10 @@
DRM_DEBUG_KMS("Turn eDP power off\n");
- WARN(intel_dp->want_panel_vdd, "Cannot turn power off while VDD is on\n");
- ironlake_panel_vdd_off_sync(intel_dp); /* finish any pending work */
+ WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n");
pp = ironlake_get_pp_control(dev_priv);
- pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE);
+ pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_BLC_ENABLE);
I915_WRITE(PCH_PP_CONTROL, pp);
POSTING_READ(PCH_PP_CONTROL);
@@ -1266,18 +1271,16 @@
{
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ /* Make sure the panel is off before trying to change the mode. But also
+ * ensure that we have vdd while we switch off the panel. */
+ ironlake_edp_panel_vdd_on(intel_dp);
ironlake_edp_backlight_off(intel_dp);
ironlake_edp_panel_off(intel_dp);
- /* Wake up the sink first */
- ironlake_edp_panel_vdd_on(intel_dp);
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
intel_dp_link_down(intel_dp);
ironlake_edp_panel_vdd_off(intel_dp, false);
-
- /* Make sure the panel is off before trying to
- * change the mode
- */
}
static void intel_dp_commit(struct drm_encoder *encoder)
@@ -1309,10 +1312,11 @@
uint32_t dp_reg = I915_READ(intel_dp->output_reg);
if (mode != DRM_MODE_DPMS_ON) {
+ /* Switching the panel off requires vdd. */
+ ironlake_edp_panel_vdd_on(intel_dp);
ironlake_edp_backlight_off(intel_dp);
ironlake_edp_panel_off(intel_dp);
- ironlake_edp_panel_vdd_on(intel_dp);
intel_dp_sink_dpms(intel_dp, mode);
intel_dp_link_down(intel_dp);
ironlake_edp_panel_vdd_off(intel_dp, false);
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 4a9707d..1991a44 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -396,11 +396,22 @@
* Wait for bus to IDLE before clearing NAK.
* If we clear the NAK while bus is still active, then it will stay
* active and the next transaction may fail.
+ *
+ * If no ACK is received during the address phase of a transaction, the
+ * adapter must report -ENXIO. It is not clear what to return if no ACK
+ * is received at other times. But we have to be careful to not return
+ * spurious -ENXIO because that will prevent i2c and drm edid functions
+ * from retrying. So return -ENXIO only when gmbus properly quiescents -
+ * timing out seems to happen when there _is_ a ddc chip present, but
+ * it's slow responding and only answers on the 2nd retry.
*/
+ ret = -ENXIO;
if (wait_for((I915_READ(GMBUS2 + reg_offset) & GMBUS_ACTIVE) == 0,
- 10))
+ 10)) {
DRM_DEBUG_KMS("GMBUS [%s] timed out after NAK\n",
adapter->name);
+ ret = -ETIMEDOUT;
+ }
/* Toggle the Software Clear Interrupt bit. This has the effect
* of resetting the GMBUS controller and so clearing the
@@ -414,14 +425,6 @@
adapter->name, msgs[i].addr,
(msgs[i].flags & I2C_M_RD) ? 'r' : 'w', msgs[i].len);
- /*
- * If no ACK is received during the address phase of a transaction,
- * the adapter must report -ENXIO.
- * It is not clear what to return if no ACK is received at other times.
- * So, we always return -ENXIO in all NAK cases, to ensure we send
- * it at least during the one case that is specified.
- */
- ret = -ENXIO;
goto out;
timeout:
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 9dee823..08eb04c 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -747,6 +747,14 @@
},
{
.callback = intel_no_lvds_dmi_callback,
+ .ident = "Hewlett-Packard HP t5740e Thin Client",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP t5740e Thin Client"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
.ident = "Hewlett-Packard t5745",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 8e79ff6..d0ce2a5 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -2270,10 +2270,33 @@
void gen6_set_rps(struct drm_device *dev, u8 val)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 swreq;
+ u32 limits;
- swreq = (val & 0x3ff) << 25;
- I915_WRITE(GEN6_RPNSWREQ, swreq);
+ limits = 0;
+ if (val >= dev_priv->max_delay)
+ val = dev_priv->max_delay;
+ else
+ limits |= dev_priv->max_delay << 24;
+
+ if (val <= dev_priv->min_delay)
+ val = dev_priv->min_delay;
+ else
+ limits |= dev_priv->min_delay << 16;
+
+ if (val == dev_priv->cur_delay)
+ return;
+
+ I915_WRITE(GEN6_RPNSWREQ,
+ GEN6_FREQUENCY(val) |
+ GEN6_OFFSET(0) |
+ GEN6_AGGRESSIVE_TURBO);
+
+ /* Make sure we continue to get interrupts
+ * until we hit the minimum or maximum frequencies.
+ */
+ I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, limits);
+
+ dev_priv->cur_delay = val;
}
void gen6_disable_rps(struct drm_device *dev)
@@ -2327,11 +2350,10 @@
void gen6_enable_rps(struct drm_i915_private *dev_priv)
{
struct intel_ring_buffer *ring;
- u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
- u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
+ u32 rp_state_cap;
+ u32 gt_perf_status;
u32 pcu_mbox, rc6_mask = 0;
u32 gtfifodbg;
- int cur_freq, min_freq, max_freq;
int rc6_mode;
int i;
@@ -2352,6 +2374,14 @@
gen6_gt_force_wake_get(dev_priv);
+ rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
+ gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
+
+ /* In units of 100MHz */
+ dev_priv->max_delay = rp_state_cap & 0xff;
+ dev_priv->min_delay = (rp_state_cap & 0xff0000) >> 16;
+ dev_priv->cur_delay = 0;
+
/* disable the counters and set deterministic thresholds */
I915_WRITE(GEN6_RC_CONTROL, 0);
@@ -2399,8 +2429,8 @@
I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
- 18 << 24 |
- 6 << 16);
+ dev_priv->max_delay << 24 |
+ dev_priv->min_delay << 16);
I915_WRITE(GEN6_RP_UP_THRESHOLD, 10000);
I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 1000000);
I915_WRITE(GEN6_RP_UP_EI, 100000);
@@ -2408,7 +2438,7 @@
I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
I915_WRITE(GEN6_RP_CONTROL,
GEN6_RP_MEDIA_TURBO |
- GEN6_RP_MEDIA_HW_MODE |
+ GEN6_RP_MEDIA_HW_NORMAL_MODE |
GEN6_RP_MEDIA_IS_GFX |
GEN6_RP_ENABLE |
GEN6_RP_UP_BUSY_AVG |
@@ -2426,10 +2456,6 @@
500))
DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
- min_freq = (rp_state_cap & 0xff0000) >> 16;
- max_freq = rp_state_cap & 0xff;
- cur_freq = (gt_perf_status & 0xff00) >> 8;
-
/* Check for overclock support */
if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
500))
@@ -2440,14 +2466,11 @@
500))
DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
if (pcu_mbox & (1<<31)) { /* OC supported */
- max_freq = pcu_mbox & 0xff;
+ dev_priv->max_delay = pcu_mbox & 0xff;
DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50);
}
- /* In units of 100MHz */
- dev_priv->max_delay = max_freq;
- dev_priv->min_delay = min_freq;
- dev_priv->cur_delay = cur_freq;
+ gen6_set_rps(dev_priv->dev, (gt_perf_status & 0xff00) >> 8);
/* requires MSI enabled */
I915_WRITE(GEN6_PMIER,
@@ -3580,8 +3603,9 @@
limits |= (dev_priv->min_delay & 0x3f) << 16;
if (old != limits) {
- DRM_ERROR("Power management discrepancy: GEN6_RP_INTERRUPT_LIMITS expected %08x, was %08x\n",
- limits, old);
+ /* Note that the known failure case is to read back 0. */
+ DRM_DEBUG_DRIVER("Power management discrepancy: GEN6_RP_INTERRUPT_LIMITS "
+ "expected %08x, was %08x\n", limits, old);
I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, limits);
}
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index a949b73..b6a9d45 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -783,10 +783,12 @@
((v_sync_len & 0x30) >> 4);
dtd->part2.dtd_flags = 0x18;
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ dtd->part2.dtd_flags |= DTD_FLAG_INTERLACE;
if (mode->flags & DRM_MODE_FLAG_PHSYNC)
- dtd->part2.dtd_flags |= 0x2;
+ dtd->part2.dtd_flags |= DTD_FLAG_HSYNC_POSITIVE;
if (mode->flags & DRM_MODE_FLAG_PVSYNC)
- dtd->part2.dtd_flags |= 0x4;
+ dtd->part2.dtd_flags |= DTD_FLAG_VSYNC_POSITIVE;
dtd->part2.sdvo_flags = 0;
dtd->part2.v_sync_off_high = v_sync_offset & 0xc0;
@@ -820,9 +822,11 @@
mode->clock = dtd->part1.clock * 10;
mode->flags &= ~(DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC);
- if (dtd->part2.dtd_flags & 0x2)
+ if (dtd->part2.dtd_flags & DTD_FLAG_INTERLACE)
+ mode->flags |= DRM_MODE_FLAG_INTERLACE;
+ if (dtd->part2.dtd_flags & DTD_FLAG_HSYNC_POSITIVE)
mode->flags |= DRM_MODE_FLAG_PHSYNC;
- if (dtd->part2.dtd_flags & 0x4)
+ if (dtd->part2.dtd_flags & DTD_FLAG_VSYNC_POSITIVE)
mode->flags |= DRM_MODE_FLAG_PVSYNC;
}
diff --git a/drivers/gpu/drm/i915/intel_sdvo_regs.h b/drivers/gpu/drm/i915/intel_sdvo_regs.h
index 6b7b22f..9d03014 100644
--- a/drivers/gpu/drm/i915/intel_sdvo_regs.h
+++ b/drivers/gpu/drm/i915/intel_sdvo_regs.h
@@ -61,6 +61,11 @@
u16 output_flags;
} __attribute__((packed));
+/* Note: SDVO detailed timing flags match EDID misc flags. */
+#define DTD_FLAG_HSYNC_POSITIVE (1 << 1)
+#define DTD_FLAG_VSYNC_POSITIVE (1 << 2)
+#define DTD_FLAG_INTERLACE (1 << 7)
+
/** This matches the EDID DTD structure, more or less */
struct intel_sdvo_dtd {
struct {
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 3346612..a233a51 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -674,6 +674,54 @@
.filter_table = filter_table,
},
{
+ .name = "480p",
+ .clock = 107520,
+ .refresh = 59940,
+ .oversample = TV_OVERSAMPLE_4X,
+ .component_only = 1,
+
+ .hsync_end = 64, .hblank_end = 122,
+ .hblank_start = 842, .htotal = 857,
+
+ .progressive = true, .trilevel_sync = false,
+
+ .vsync_start_f1 = 12, .vsync_start_f2 = 12,
+ .vsync_len = 12,
+
+ .veq_ena = false,
+
+ .vi_end_f1 = 44, .vi_end_f2 = 44,
+ .nbr_end = 479,
+
+ .burst_ena = false,
+
+ .filter_table = filter_table,
+ },
+ {
+ .name = "576p",
+ .clock = 107520,
+ .refresh = 50000,
+ .oversample = TV_OVERSAMPLE_4X,
+ .component_only = 1,
+
+ .hsync_end = 64, .hblank_end = 139,
+ .hblank_start = 859, .htotal = 863,
+
+ .progressive = true, .trilevel_sync = false,
+
+ .vsync_start_f1 = 10, .vsync_start_f2 = 10,
+ .vsync_len = 10,
+
+ .veq_ena = false,
+
+ .vi_end_f1 = 48, .vi_end_f2 = 48,
+ .nbr_end = 575,
+
+ .burst_ena = false,
+
+ .filter_table = filter_table,
+ },
+ {
.name = "720p@60Hz",
.clock = 148800,
.refresh = 60000,
@@ -1194,6 +1242,11 @@
I915_WRITE(TV_DAC, save_tv_dac & ~TVDAC_STATE_CHG_EN);
I915_WRITE(TV_CTL, save_tv_ctl);
+ POSTING_READ(TV_CTL);
+
+ /* For unknown reasons the hw barfs if we don't do this vblank wait. */
+ intel_wait_for_vblank(intel_tv->base.base.dev,
+ to_intel_crtc(intel_tv->base.base.crtc)->pipe);
/* Restore interrupt config */
if (connector->polled & DRM_CONNECTOR_POLL_HPD) {
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index b01c2dd..ce4e7cc 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -865,7 +865,7 @@
/* num banks is 8 on all fusion asics. 0 = 4, 1 = 8, 2 = 16 */
if (rdev->flags & RADEON_IS_IGP)
- rdev->config.evergreen.tile_config |= 1 << 4;
+ rdev->config.cayman.tile_config |= 1 << 4;
else
rdev->config.cayman.tile_config |=
((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4;
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 1dc3a4a..492654f 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -848,7 +848,6 @@
s32 priority;
};
-extern int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx);
extern int radeon_cs_finish_pages(struct radeon_cs_parser *p);
extern u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx);
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index f6e69b8..b1e3820 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -444,7 +444,9 @@
*/
if ((dev->pdev->device == 0x9498) &&
(dev->pdev->subsystem_vendor == 0x1682) &&
- (dev->pdev->subsystem_device == 0x2452)) {
+ (dev->pdev->subsystem_device == 0x2452) &&
+ (i2c_bus->valid == false) &&
+ !(supported_device & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))) {
struct radeon_device *rdev = dev->dev_private;
*i2c_bus = radeon_lookup_i2c_gpio(rdev, 0x93);
}
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index c7d64a7..0137689 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -580,7 +580,7 @@
return 0;
}
-int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx)
+static int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx)
{
int new_page;
struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
@@ -623,3 +623,28 @@
return new_page;
}
+
+u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
+{
+ struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
+ u32 pg_idx, pg_offset;
+ u32 idx_value = 0;
+ int new_page;
+
+ pg_idx = (idx * 4) / PAGE_SIZE;
+ pg_offset = (idx * 4) % PAGE_SIZE;
+
+ if (ibc->kpage_idx[0] == pg_idx)
+ return ibc->kpage[0][pg_offset/4];
+ if (ibc->kpage_idx[1] == pg_idx)
+ return ibc->kpage[1][pg_offset/4];
+
+ new_page = radeon_cs_update_pages(p, pg_idx);
+ if (new_page < 0) {
+ p->parser_error = new_page;
+ return 0;
+ }
+
+ idx_value = ibc->kpage[new_page][pg_offset/4];
+ return idx_value;
+}
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 493a7be..983658c 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -39,31 +39,6 @@
*/
int radeon_debugfs_sa_init(struct radeon_device *rdev);
-u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
-{
- struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
- u32 pg_idx, pg_offset;
- u32 idx_value = 0;
- int new_page;
-
- pg_idx = (idx * 4) / PAGE_SIZE;
- pg_offset = (idx * 4) % PAGE_SIZE;
-
- if (ibc->kpage_idx[0] == pg_idx)
- return ibc->kpage[0][pg_offset/4];
- if (ibc->kpage_idx[1] == pg_idx)
- return ibc->kpage[1][pg_offset/4];
-
- new_page = radeon_cs_update_pages(p, pg_idx);
- if (new_page < 0) {
- p->parser_error = new_page;
- return 0;
- }
-
- idx_value = ibc->kpage[new_page][pg_offset/4];
- return idx_value;
-}
-
int radeon_ib_get(struct radeon_device *rdev, int ring,
struct radeon_ib *ib, unsigned size)
{
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index 40efd32..97acc9c 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -234,7 +234,7 @@
ret = udl_gem_get_pages(gobj, GFP_KERNEL);
if (ret)
- return ret;
+ goto out;
if (!gobj->base.map_list.map) {
ret = drm_gem_create_mmap_offset(obj);
if (ret)
@@ -257,8 +257,6 @@
{
struct udl_gem_object *obj;
int npages;
- int i;
- struct scatterlist *iter;
npages = size / PAGE_SIZE;
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 7cd9bf4..6f1d167 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -1036,8 +1036,9 @@
config SENSORS_SCH5627
tristate "SMSC SCH5627"
- depends on !PPC
+ depends on !PPC && WATCHDOG
select SENSORS_SCH56XX_COMMON
+ select WATCHDOG_CORE
help
If you say yes here you get support for the hardware monitoring
features of the SMSC SCH5627 Super-I/O chip including support for
@@ -1048,8 +1049,9 @@
config SENSORS_SCH5636
tristate "SMSC SCH5636"
- depends on !PPC
+ depends on !PPC && WATCHDOG
select SENSORS_SCH56XX_COMMON
+ select WATCHDOG_CORE
help
SMSC SCH5636 Super I/O chips include an embedded microcontroller for
hardware monitoring solutions, allowing motherboard manufacturers to
diff --git a/drivers/hwmon/sch5627.c b/drivers/hwmon/sch5627.c
index 8ec6dfb..8342275 100644
--- a/drivers/hwmon/sch5627.c
+++ b/drivers/hwmon/sch5627.c
@@ -579,7 +579,7 @@
}
/* Note failing to register the watchdog is not a fatal error */
- data->watchdog = sch56xx_watchdog_register(data->addr,
+ data->watchdog = sch56xx_watchdog_register(&pdev->dev, data->addr,
(build_code << 24) | (build_id << 8) | hwmon_rev,
&data->update_lock, 1);
diff --git a/drivers/hwmon/sch5636.c b/drivers/hwmon/sch5636.c
index 906d4ed..96a7e68 100644
--- a/drivers/hwmon/sch5636.c
+++ b/drivers/hwmon/sch5636.c
@@ -510,7 +510,7 @@
}
/* Note failing to register the watchdog is not a fatal error */
- data->watchdog = sch56xx_watchdog_register(data->addr,
+ data->watchdog = sch56xx_watchdog_register(&pdev->dev, data->addr,
(revision[0] << 8) | revision[1],
&data->update_lock, 0);
diff --git a/drivers/hwmon/sch56xx-common.c b/drivers/hwmon/sch56xx-common.c
index ce52fc5..4380f5d 100644
--- a/drivers/hwmon/sch56xx-common.c
+++ b/drivers/hwmon/sch56xx-common.c
@@ -66,15 +66,10 @@
struct sch56xx_watchdog_data {
u16 addr;
- u32 revision;
struct mutex *io_lock;
- struct mutex watchdog_lock;
- struct list_head list; /* member of the watchdog_data_list */
struct kref kref;
- struct miscdevice watchdog_miscdev;
- unsigned long watchdog_is_open;
- char watchdog_name[10]; /* must be unique to avoid sysfs conflict */
- char watchdog_expect_close;
+ struct watchdog_info wdinfo;
+ struct watchdog_device wddev;
u8 watchdog_preset;
u8 watchdog_control;
u8 watchdog_output_enable;
@@ -82,15 +77,6 @@
static struct platform_device *sch56xx_pdev;
-/*
- * Somewhat ugly :( global data pointer list with all sch56xx devices, so that
- * we can find our device data as when using misc_register there is no other
- * method to get to ones device data from the open fop.
- */
-static LIST_HEAD(watchdog_data_list);
-/* Note this lock not only protect list access, but also data.kref access */
-static DEFINE_MUTEX(watchdog_data_mutex);
-
/* Super I/O functions */
static inline int superio_inb(int base, int reg)
{
@@ -272,22 +258,22 @@
* Watchdog routines
*/
-/*
- * Release our data struct when the platform device has been released *and*
- * all references to our watchdog device are released.
- */
-static void sch56xx_watchdog_release_resources(struct kref *r)
+/* Release our data struct when we're unregistered *and*
+ all references to our watchdog device are released */
+static void watchdog_release_resources(struct kref *r)
{
struct sch56xx_watchdog_data *data =
container_of(r, struct sch56xx_watchdog_data, kref);
kfree(data);
}
-static int watchdog_set_timeout(struct sch56xx_watchdog_data *data,
- int timeout)
+static int watchdog_set_timeout(struct watchdog_device *wddev,
+ unsigned int timeout)
{
- int ret, resolution;
+ struct sch56xx_watchdog_data *data = watchdog_get_drvdata(wddev);
+ unsigned int resolution;
u8 control;
+ int ret;
/* 1 second or 60 second resolution? */
if (timeout <= 255)
@@ -298,12 +284,6 @@
if (timeout < resolution || timeout > (resolution * 255))
return -EINVAL;
- mutex_lock(&data->watchdog_lock);
- if (!data->addr) {
- ret = -ENODEV;
- goto leave;
- }
-
if (resolution == 1)
control = data->watchdog_control | SCH56XX_WDOG_TIME_BASE_SEC;
else
@@ -316,7 +296,7 @@
control);
mutex_unlock(data->io_lock);
if (ret)
- goto leave;
+ return ret;
data->watchdog_control = control;
}
@@ -326,38 +306,17 @@
* the watchdog countdown.
*/
data->watchdog_preset = DIV_ROUND_UP(timeout, resolution);
+ wddev->timeout = data->watchdog_preset * resolution;
- ret = data->watchdog_preset * resolution;
-leave:
- mutex_unlock(&data->watchdog_lock);
- return ret;
+ return 0;
}
-static int watchdog_get_timeout(struct sch56xx_watchdog_data *data)
+static int watchdog_start(struct watchdog_device *wddev)
{
- int timeout;
-
- mutex_lock(&data->watchdog_lock);
- if (data->watchdog_control & SCH56XX_WDOG_TIME_BASE_SEC)
- timeout = data->watchdog_preset;
- else
- timeout = data->watchdog_preset * 60;
- mutex_unlock(&data->watchdog_lock);
-
- return timeout;
-}
-
-static int watchdog_start(struct sch56xx_watchdog_data *data)
-{
+ struct sch56xx_watchdog_data *data = watchdog_get_drvdata(wddev);
int ret;
u8 val;
- mutex_lock(&data->watchdog_lock);
- if (!data->addr) {
- ret = -ENODEV;
- goto leave_unlock_watchdog;
- }
-
/*
* The sch56xx's watchdog cannot really be started / stopped
* it is always running, but we can avoid the timer expiring
@@ -385,18 +344,14 @@
if (ret)
goto leave;
- /* 2. Enable output (if not already enabled) */
- if (!(data->watchdog_output_enable & SCH56XX_WDOG_OUTPUT_ENABLE)) {
- val = data->watchdog_output_enable |
- SCH56XX_WDOG_OUTPUT_ENABLE;
- ret = sch56xx_write_virtual_reg(data->addr,
- SCH56XX_REG_WDOG_OUTPUT_ENABLE,
- val);
- if (ret)
- goto leave;
+ /* 2. Enable output */
+ val = data->watchdog_output_enable | SCH56XX_WDOG_OUTPUT_ENABLE;
+ ret = sch56xx_write_virtual_reg(data->addr,
+ SCH56XX_REG_WDOG_OUTPUT_ENABLE, val);
+ if (ret)
+ goto leave;
- data->watchdog_output_enable = val;
- }
+ data->watchdog_output_enable = val;
/* 3. Clear the watchdog event bit if set */
val = inb(data->addr + 9);
@@ -405,234 +360,70 @@
leave:
mutex_unlock(data->io_lock);
-leave_unlock_watchdog:
- mutex_unlock(&data->watchdog_lock);
return ret;
}
-static int watchdog_trigger(struct sch56xx_watchdog_data *data)
+static int watchdog_trigger(struct watchdog_device *wddev)
{
+ struct sch56xx_watchdog_data *data = watchdog_get_drvdata(wddev);
int ret;
- mutex_lock(&data->watchdog_lock);
- if (!data->addr) {
- ret = -ENODEV;
- goto leave;
- }
-
/* Reset the watchdog countdown counter */
mutex_lock(data->io_lock);
ret = sch56xx_write_virtual_reg(data->addr, SCH56XX_REG_WDOG_PRESET,
data->watchdog_preset);
mutex_unlock(data->io_lock);
-leave:
- mutex_unlock(&data->watchdog_lock);
+
return ret;
}
-static int watchdog_stop_unlocked(struct sch56xx_watchdog_data *data)
+static int watchdog_stop(struct watchdog_device *wddev)
{
+ struct sch56xx_watchdog_data *data = watchdog_get_drvdata(wddev);
int ret = 0;
u8 val;
- if (!data->addr)
- return -ENODEV;
+ val = data->watchdog_output_enable & ~SCH56XX_WDOG_OUTPUT_ENABLE;
+ mutex_lock(data->io_lock);
+ ret = sch56xx_write_virtual_reg(data->addr,
+ SCH56XX_REG_WDOG_OUTPUT_ENABLE, val);
+ mutex_unlock(data->io_lock);
+ if (ret)
+ return ret;
- if (data->watchdog_output_enable & SCH56XX_WDOG_OUTPUT_ENABLE) {
- val = data->watchdog_output_enable &
- ~SCH56XX_WDOG_OUTPUT_ENABLE;
- mutex_lock(data->io_lock);
- ret = sch56xx_write_virtual_reg(data->addr,
- SCH56XX_REG_WDOG_OUTPUT_ENABLE,
- val);
- mutex_unlock(data->io_lock);
- if (ret)
- return ret;
-
- data->watchdog_output_enable = val;
- }
-
- return ret;
-}
-
-static int watchdog_stop(struct sch56xx_watchdog_data *data)
-{
- int ret;
-
- mutex_lock(&data->watchdog_lock);
- ret = watchdog_stop_unlocked(data);
- mutex_unlock(&data->watchdog_lock);
-
- return ret;
-}
-
-static int watchdog_release(struct inode *inode, struct file *filp)
-{
- struct sch56xx_watchdog_data *data = filp->private_data;
-
- if (data->watchdog_expect_close) {
- watchdog_stop(data);
- data->watchdog_expect_close = 0;
- } else {
- watchdog_trigger(data);
- pr_crit("unexpected close, not stopping watchdog!\n");
- }
-
- clear_bit(0, &data->watchdog_is_open);
-
- mutex_lock(&watchdog_data_mutex);
- kref_put(&data->kref, sch56xx_watchdog_release_resources);
- mutex_unlock(&watchdog_data_mutex);
-
+ data->watchdog_output_enable = val;
return 0;
}
-static int watchdog_open(struct inode *inode, struct file *filp)
+static void watchdog_ref(struct watchdog_device *wddev)
{
- struct sch56xx_watchdog_data *pos, *data = NULL;
- int ret, watchdog_is_open;
+ struct sch56xx_watchdog_data *data = watchdog_get_drvdata(wddev);
- /*
- * We get called from drivers/char/misc.c with misc_mtx hold, and we
- * call misc_register() from sch56xx_watchdog_probe() with
- * watchdog_data_mutex hold, as misc_register() takes the misc_mtx
- * lock, this is a possible deadlock, so we use mutex_trylock here.
- */
- if (!mutex_trylock(&watchdog_data_mutex))
- return -ERESTARTSYS;
- list_for_each_entry(pos, &watchdog_data_list, list) {
- if (pos->watchdog_miscdev.minor == iminor(inode)) {
- data = pos;
- break;
- }
- }
- /* Note we can never not have found data, so we don't check for this */
- watchdog_is_open = test_and_set_bit(0, &data->watchdog_is_open);
- if (!watchdog_is_open)
- kref_get(&data->kref);
- mutex_unlock(&watchdog_data_mutex);
-
- if (watchdog_is_open)
- return -EBUSY;
-
- filp->private_data = data;
-
- /* Start the watchdog */
- ret = watchdog_start(data);
- if (ret) {
- watchdog_release(inode, filp);
- return ret;
- }
-
- return nonseekable_open(inode, filp);
+ kref_get(&data->kref);
}
-static ssize_t watchdog_write(struct file *filp, const char __user *buf,
- size_t count, loff_t *offset)
+static void watchdog_unref(struct watchdog_device *wddev)
{
- int ret;
- struct sch56xx_watchdog_data *data = filp->private_data;
+ struct sch56xx_watchdog_data *data = watchdog_get_drvdata(wddev);
- if (count) {
- if (!nowayout) {
- size_t i;
-
- /* Clear it in case it was set with a previous write */
- data->watchdog_expect_close = 0;
-
- for (i = 0; i != count; i++) {
- char c;
- if (get_user(c, buf + i))
- return -EFAULT;
- if (c == 'V')
- data->watchdog_expect_close = 1;
- }
- }
- ret = watchdog_trigger(data);
- if (ret)
- return ret;
- }
- return count;
+ kref_put(&data->kref, watchdog_release_resources);
}
-static long watchdog_ioctl(struct file *filp, unsigned int cmd,
- unsigned long arg)
-{
- struct watchdog_info ident = {
- .options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT,
- .identity = "sch56xx watchdog"
- };
- int i, ret = 0;
- struct sch56xx_watchdog_data *data = filp->private_data;
-
- switch (cmd) {
- case WDIOC_GETSUPPORT:
- ident.firmware_version = data->revision;
- if (!nowayout)
- ident.options |= WDIOF_MAGICCLOSE;
- if (copy_to_user((void __user *)arg, &ident, sizeof(ident)))
- ret = -EFAULT;
- break;
-
- case WDIOC_GETSTATUS:
- case WDIOC_GETBOOTSTATUS:
- ret = put_user(0, (int __user *)arg);
- break;
-
- case WDIOC_KEEPALIVE:
- ret = watchdog_trigger(data);
- break;
-
- case WDIOC_GETTIMEOUT:
- i = watchdog_get_timeout(data);
- ret = put_user(i, (int __user *)arg);
- break;
-
- case WDIOC_SETTIMEOUT:
- if (get_user(i, (int __user *)arg)) {
- ret = -EFAULT;
- break;
- }
- ret = watchdog_set_timeout(data, i);
- if (ret >= 0)
- ret = put_user(ret, (int __user *)arg);
- break;
-
- case WDIOC_SETOPTIONS:
- if (get_user(i, (int __user *)arg)) {
- ret = -EFAULT;
- break;
- }
-
- if (i & WDIOS_DISABLECARD)
- ret = watchdog_stop(data);
- else if (i & WDIOS_ENABLECARD)
- ret = watchdog_trigger(data);
- else
- ret = -EINVAL;
- break;
-
- default:
- ret = -ENOTTY;
- }
- return ret;
-}
-
-static const struct file_operations watchdog_fops = {
- .owner = THIS_MODULE,
- .llseek = no_llseek,
- .open = watchdog_open,
- .release = watchdog_release,
- .write = watchdog_write,
- .unlocked_ioctl = watchdog_ioctl,
+static const struct watchdog_ops watchdog_ops = {
+ .owner = THIS_MODULE,
+ .start = watchdog_start,
+ .stop = watchdog_stop,
+ .ping = watchdog_trigger,
+ .set_timeout = watchdog_set_timeout,
+ .ref = watchdog_ref,
+ .unref = watchdog_unref,
};
-struct sch56xx_watchdog_data *sch56xx_watchdog_register(
+struct sch56xx_watchdog_data *sch56xx_watchdog_register(struct device *parent,
u16 addr, u32 revision, struct mutex *io_lock, int check_enabled)
{
struct sch56xx_watchdog_data *data;
- int i, err, control, output_enable;
- const int watchdog_minors[] = { WATCHDOG_MINOR, 212, 213, 214, 215 };
+ int err, control, output_enable;
/* Cache the watchdog registers */
mutex_lock(io_lock);
@@ -656,82 +447,55 @@
return NULL;
data->addr = addr;
- data->revision = revision;
data->io_lock = io_lock;
- data->watchdog_control = control;
- data->watchdog_output_enable = output_enable;
- mutex_init(&data->watchdog_lock);
- INIT_LIST_HEAD(&data->list);
kref_init(&data->kref);
- err = watchdog_set_timeout(data, 60);
- if (err < 0)
- goto error;
+ strlcpy(data->wdinfo.identity, "sch56xx watchdog",
+ sizeof(data->wdinfo.identity));
+ data->wdinfo.firmware_version = revision;
+ data->wdinfo.options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT;
+ if (!nowayout)
+ data->wdinfo.options |= WDIOF_MAGICCLOSE;
- /*
- * We take the data_mutex lock early so that watchdog_open() cannot
- * run when misc_register() has completed, but we've not yet added
- * our data to the watchdog_data_list.
- */
- mutex_lock(&watchdog_data_mutex);
- for (i = 0; i < ARRAY_SIZE(watchdog_minors); i++) {
- /* Register our watchdog part */
- snprintf(data->watchdog_name, sizeof(data->watchdog_name),
- "watchdog%c", (i == 0) ? '\0' : ('0' + i));
- data->watchdog_miscdev.name = data->watchdog_name;
- data->watchdog_miscdev.fops = &watchdog_fops;
- data->watchdog_miscdev.minor = watchdog_minors[i];
- err = misc_register(&data->watchdog_miscdev);
- if (err == -EBUSY)
- continue;
- if (err)
- break;
+ data->wddev.info = &data->wdinfo;
+ data->wddev.ops = &watchdog_ops;
+ data->wddev.parent = parent;
+ data->wddev.timeout = 60;
+ data->wddev.min_timeout = 1;
+ data->wddev.max_timeout = 255 * 60;
+ if (nowayout)
+ set_bit(WDOG_NO_WAY_OUT, &data->wddev.status);
+ if (output_enable & SCH56XX_WDOG_OUTPUT_ENABLE)
+ set_bit(WDOG_ACTIVE, &data->wddev.status);
- list_add(&data->list, &watchdog_data_list);
- pr_info("Registered /dev/%s chardev major 10, minor: %d\n",
- data->watchdog_name, watchdog_minors[i]);
- break;
- }
- mutex_unlock(&watchdog_data_mutex);
+ /* Since the watchdog uses a downcounter there is no register to read
+ the BIOS set timeout from (if any was set at all) ->
+ Choose a preset which will give us a 1 minute timeout */
+ if (control & SCH56XX_WDOG_TIME_BASE_SEC)
+ data->watchdog_preset = 60; /* seconds */
+ else
+ data->watchdog_preset = 1; /* minute */
+ data->watchdog_control = control;
+ data->watchdog_output_enable = output_enable;
+
+ watchdog_set_drvdata(&data->wddev, data);
+ err = watchdog_register_device(&data->wddev);
if (err) {
pr_err("Registering watchdog chardev: %d\n", err);
- goto error;
- }
- if (i == ARRAY_SIZE(watchdog_minors)) {
- pr_warn("Couldn't register watchdog (no free minor)\n");
- goto error;
+ kfree(data);
+ return NULL;
}
return data;
-
-error:
- kfree(data);
- return NULL;
}
EXPORT_SYMBOL(sch56xx_watchdog_register);
void sch56xx_watchdog_unregister(struct sch56xx_watchdog_data *data)
{
- mutex_lock(&watchdog_data_mutex);
- misc_deregister(&data->watchdog_miscdev);
- list_del(&data->list);
- mutex_unlock(&watchdog_data_mutex);
-
- mutex_lock(&data->watchdog_lock);
- if (data->watchdog_is_open) {
- pr_warn("platform device unregistered with watchdog "
- "open! Stopping watchdog.\n");
- watchdog_stop_unlocked(data);
- }
- /* Tell the wdog start/stop/trigger functions our dev is gone */
- data->addr = 0;
- data->io_lock = NULL;
- mutex_unlock(&data->watchdog_lock);
-
- mutex_lock(&watchdog_data_mutex);
- kref_put(&data->kref, sch56xx_watchdog_release_resources);
- mutex_unlock(&watchdog_data_mutex);
+ watchdog_unregister_device(&data->wddev);
+ kref_put(&data->kref, watchdog_release_resources);
+ /* Don't touch data after this it may have been free-ed! */
}
EXPORT_SYMBOL(sch56xx_watchdog_unregister);
diff --git a/drivers/hwmon/sch56xx-common.h b/drivers/hwmon/sch56xx-common.h
index 7475086..704ea2c 100644
--- a/drivers/hwmon/sch56xx-common.h
+++ b/drivers/hwmon/sch56xx-common.h
@@ -27,6 +27,6 @@
int sch56xx_read_virtual_reg12(u16 addr, u16 msb_reg, u16 lsn_reg,
int high_nibble);
-struct sch56xx_watchdog_data *sch56xx_watchdog_register(
+struct sch56xx_watchdog_data *sch56xx_watchdog_register(struct device *parent,
u16 addr, u32 revision, struct mutex *io_lock, int check_enabled);
void sch56xx_watchdog_unregister(struct sch56xx_watchdog_data *data);
diff --git a/drivers/i2c/algos/i2c-algo-bit.c b/drivers/i2c/algos/i2c-algo-bit.c
index 7f0b832..fad22b0 100644
--- a/drivers/i2c/algos/i2c-algo-bit.c
+++ b/drivers/i2c/algos/i2c-algo-bit.c
@@ -608,7 +608,7 @@
static u32 bit_func(struct i2c_adapter *adap)
{
- return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
+ return I2C_FUNC_I2C | I2C_FUNC_NOSTART | I2C_FUNC_SMBUS_EMUL |
I2C_FUNC_SMBUS_READ_BLOCK_DATA |
I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
I2C_FUNC_10BIT_ADDR | I2C_FUNC_PROTOCOL_MANGLING;
diff --git a/drivers/i2c/busses/i2c-nuc900.c b/drivers/i2c/busses/i2c-nuc900.c
index 03b6157..a26dfb8 100644
--- a/drivers/i2c/busses/i2c-nuc900.c
+++ b/drivers/i2c/busses/i2c-nuc900.c
@@ -502,7 +502,8 @@
/* declare our i2c functionality */
static u32 nuc900_i2c_func(struct i2c_adapter *adap)
{
- return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_PROTOCOL_MANGLING;
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_NOSTART |
+ I2C_FUNC_PROTOCOL_MANGLING;
}
/* i2c bus registration info */
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index fa0b134..0195915 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -626,7 +626,8 @@
/* declare our i2c functionality */
static u32 s3c24xx_i2c_func(struct i2c_adapter *adap)
{
- return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_PROTOCOL_MANGLING;
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_NOSTART |
+ I2C_FUNC_PROTOCOL_MANGLING;
}
/* i2c bus registration info */
diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
index 4504832..5ec2261 100644
--- a/drivers/i2c/i2c-dev.c
+++ b/drivers/i2c/i2c-dev.c
@@ -265,19 +265,41 @@
res = 0;
for (i = 0; i < rdwr_arg.nmsgs; i++) {
- /* Limit the size of the message to a sane amount;
- * and don't let length change either. */
- if ((rdwr_pa[i].len > 8192) ||
- (rdwr_pa[i].flags & I2C_M_RECV_LEN)) {
+ /* Limit the size of the message to a sane amount */
+ if (rdwr_pa[i].len > 8192) {
res = -EINVAL;
break;
}
+
data_ptrs[i] = (u8 __user *)rdwr_pa[i].buf;
rdwr_pa[i].buf = memdup_user(data_ptrs[i], rdwr_pa[i].len);
if (IS_ERR(rdwr_pa[i].buf)) {
res = PTR_ERR(rdwr_pa[i].buf);
break;
}
+
+ /*
+ * If the message length is received from the slave (similar
+ * to SMBus block read), we must ensure that the buffer will
+ * be large enough to cope with a message length of
+ * I2C_SMBUS_BLOCK_MAX as this is the maximum underlying bus
+ * drivers allow. The first byte in the buffer must be
+ * pre-filled with the number of extra bytes, which must be
+ * at least one to hold the message length, but can be
+ * greater (for example to account for a checksum byte at
+ * the end of the message.)
+ */
+ if (rdwr_pa[i].flags & I2C_M_RECV_LEN) {
+ if (!(rdwr_pa[i].flags & I2C_M_RD) ||
+ rdwr_pa[i].buf[0] < 1 ||
+ rdwr_pa[i].len < rdwr_pa[i].buf[0] +
+ I2C_SMBUS_BLOCK_MAX) {
+ res = -EINVAL;
+ break;
+ }
+
+ rdwr_pa[i].len = rdwr_pa[i].buf[0];
+ }
}
if (res < 0) {
int j;
diff --git a/drivers/input/joystick/as5011.c b/drivers/input/joystick/as5011.c
index 3063464..57d19d4 100644
--- a/drivers/input/joystick/as5011.c
+++ b/drivers/input/joystick/as5011.c
@@ -231,6 +231,7 @@
}
if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_NOSTART |
I2C_FUNC_PROTOCOL_MANGLING)) {
dev_err(&client->dev,
"need i2c bus that supports protocol mangling\n");
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index a5bee8e..d90a421 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -450,12 +450,27 @@
static void iommu_print_event(struct amd_iommu *iommu, void *__evt)
{
- u32 *event = __evt;
- int type = (event[1] >> EVENT_TYPE_SHIFT) & EVENT_TYPE_MASK;
- int devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK;
- int domid = (event[1] >> EVENT_DOMID_SHIFT) & EVENT_DOMID_MASK;
- int flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
- u64 address = (u64)(((u64)event[3]) << 32) | event[2];
+ int type, devid, domid, flags;
+ volatile u32 *event = __evt;
+ int count = 0;
+ u64 address;
+
+retry:
+ type = (event[1] >> EVENT_TYPE_SHIFT) & EVENT_TYPE_MASK;
+ devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK;
+ domid = (event[1] >> EVENT_DOMID_SHIFT) & EVENT_DOMID_MASK;
+ flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
+ address = (u64)(((u64)event[3]) << 32) | event[2];
+
+ if (type == 0) {
+ /* Did we hit the erratum? */
+ if (++count == LOOP_TIMEOUT) {
+ pr_err("AMD-Vi: No event written to event log\n");
+ return;
+ }
+ udelay(1);
+ goto retry;
+ }
printk(KERN_ERR "AMD-Vi: Event logged [");
@@ -508,6 +523,8 @@
default:
printk(KERN_ERR "UNKNOWN type=0x%02x]\n", type);
}
+
+ memset(__evt, 0, 4 * sizeof(u32));
}
static void iommu_poll_events(struct amd_iommu *iommu)
@@ -2035,20 +2052,20 @@
}
/* FIXME: Move this to PCI code */
-#define PCI_PRI_TLP_OFF (1 << 2)
+#define PCI_PRI_TLP_OFF (1 << 15)
bool pci_pri_tlp_required(struct pci_dev *pdev)
{
- u16 control;
+ u16 status;
int pos;
pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
if (!pos)
return false;
- pci_read_config_word(pdev, pos + PCI_PRI_CTRL, &control);
+ pci_read_config_word(pdev, pos + PCI_PRI_STATUS, &status);
- return (control & PCI_PRI_TLP_OFF) ? true : false;
+ return (status & PCI_PRI_TLP_OFF) ? true : false;
}
/*
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 2198b2d..8b9ded8 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -119,6 +119,7 @@
* iommu_set_fault_handler() - set a fault handler for an iommu domain
* @domain: iommu domain
* @handler: fault handler
+ * @token: user data, will be passed back to the fault handler
*
* This function should be used by IOMMU users which want to be notified
* whenever an IOMMU fault happens.
@@ -127,11 +128,13 @@
* error code otherwise.
*/
void iommu_set_fault_handler(struct iommu_domain *domain,
- iommu_fault_handler_t handler)
+ iommu_fault_handler_t handler,
+ void *token)
{
BUG_ON(!domain);
domain->handler = handler;
+ domain->handler_token = token;
}
EXPORT_SYMBOL_GPL(iommu_set_fault_handler);
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
index 6899dcd..e70ee2b 100644
--- a/drivers/iommu/omap-iommu.c
+++ b/drivers/iommu/omap-iommu.c
@@ -41,11 +41,13 @@
* @pgtable: the page table
* @iommu_dev: an omap iommu device attached to this domain. only a single
* iommu device can be attached for now.
+ * @dev: Device using this domain.
* @lock: domain lock, should be taken when attaching/detaching
*/
struct omap_iommu_domain {
u32 *pgtable;
struct omap_iommu *iommu_dev;
+ struct device *dev;
spinlock_t lock;
};
@@ -1081,6 +1083,7 @@
}
omap_domain->iommu_dev = arch_data->iommu_dev = oiommu;
+ omap_domain->dev = dev;
oiommu->domain = domain;
out:
@@ -1088,19 +1091,16 @@
return ret;
}
-static void omap_iommu_detach_dev(struct iommu_domain *domain,
- struct device *dev)
+static void _omap_iommu_detach_dev(struct omap_iommu_domain *omap_domain,
+ struct device *dev)
{
- struct omap_iommu_domain *omap_domain = domain->priv;
- struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
struct omap_iommu *oiommu = dev_to_omap_iommu(dev);
-
- spin_lock(&omap_domain->lock);
+ struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
/* only a single device is supported per domain for now */
if (omap_domain->iommu_dev != oiommu) {
dev_err(dev, "invalid iommu device\n");
- goto out;
+ return;
}
iopgtable_clear_entry_all(oiommu);
@@ -1108,8 +1108,16 @@
omap_iommu_detach(oiommu);
omap_domain->iommu_dev = arch_data->iommu_dev = NULL;
+ omap_domain->dev = NULL;
+}
-out:
+static void omap_iommu_detach_dev(struct iommu_domain *domain,
+ struct device *dev)
+{
+ struct omap_iommu_domain *omap_domain = domain->priv;
+
+ spin_lock(&omap_domain->lock);
+ _omap_iommu_detach_dev(omap_domain, dev);
spin_unlock(&omap_domain->lock);
}
@@ -1148,13 +1156,19 @@
return -ENOMEM;
}
-/* assume device was already detached */
static void omap_iommu_domain_destroy(struct iommu_domain *domain)
{
struct omap_iommu_domain *omap_domain = domain->priv;
domain->priv = NULL;
+ /*
+ * An iommu device is still attached
+ * (currently, only one device can be attached) ?
+ */
+ if (omap_domain->iommu_dev)
+ _omap_iommu_detach_dev(omap_domain, omap_domain->dev);
+
kfree(omap_domain->pgtable);
kfree(omap_domain);
}
diff --git a/drivers/iommu/tegra-gart.c b/drivers/iommu/tegra-gart.c
index 779306e..0c0a377 100644
--- a/drivers/iommu/tegra-gart.c
+++ b/drivers/iommu/tegra-gart.c
@@ -29,15 +29,17 @@
#include <linux/device.h>
#include <linux/io.h>
#include <linux/iommu.h>
+#include <linux/of.h>
#include <asm/cacheflush.h>
/* bitmap of the page sizes currently supported */
#define GART_IOMMU_PGSIZES (SZ_4K)
-#define GART_CONFIG 0x24
-#define GART_ENTRY_ADDR 0x28
-#define GART_ENTRY_DATA 0x2c
+#define GART_REG_BASE 0x24
+#define GART_CONFIG (0x24 - GART_REG_BASE)
+#define GART_ENTRY_ADDR (0x28 - GART_REG_BASE)
+#define GART_ENTRY_DATA (0x2c - GART_REG_BASE)
#define GART_ENTRY_PHYS_ADDR_VALID (1 << 31)
#define GART_PAGE_SHIFT 12
@@ -158,7 +160,7 @@
struct gart_client *client, *c;
int err = 0;
- gart = dev_get_drvdata(dev->parent);
+ gart = gart_handle;
if (!gart)
return -EINVAL;
domain->priv = gart;
@@ -422,6 +424,14 @@
.resume = tegra_gart_resume,
};
+#ifdef CONFIG_OF
+static struct of_device_id tegra_gart_of_match[] __devinitdata = {
+ { .compatible = "nvidia,tegra20-gart", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, tegra_gart_of_match);
+#endif
+
static struct platform_driver tegra_gart_driver = {
.probe = tegra_gart_probe,
.remove = tegra_gart_remove,
@@ -429,6 +439,7 @@
.owner = THIS_MODULE,
.name = "tegra-gart",
.pm = &tegra_gart_pm_ops,
+ .of_match_table = of_match_ptr(tegra_gart_of_match),
},
};
@@ -448,4 +459,5 @@
MODULE_DESCRIPTION("IOMMU API for GART in Tegra20");
MODULE_AUTHOR("Hiroshi DOYU <hdoyu@nvidia.com>");
+MODULE_ALIAS("platform:tegra-gart");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index eb93c82..ecd6790 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -733,7 +733,7 @@
pr_info("Reserve \"page zero\" for AVP vectors using a common dummy\n");
}
- dev_dbg(smmu->dev, "%s is attached\n", dev_name(c->dev));
+ dev_dbg(smmu->dev, "%s is attached\n", dev_name(dev));
return 0;
err_client:
diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c
index 4de7364..d1827e8 100644
--- a/drivers/net/ethernet/rdc/r6040.c
+++ b/drivers/net/ethernet/rdc/r6040.c
@@ -1096,20 +1096,20 @@
if (err) {
dev_err(&pdev->dev, "32-bit PCI DMA addresses"
"not supported by the card\n");
- goto err_out;
+ goto err_out_disable_dev;
}
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
dev_err(&pdev->dev, "32-bit PCI DMA addresses"
"not supported by the card\n");
- goto err_out;
+ goto err_out_disable_dev;
}
/* IO Size check */
if (pci_resource_len(pdev, bar) < io_size) {
dev_err(&pdev->dev, "Insufficient PCI resources, aborting\n");
err = -EIO;
- goto err_out;
+ goto err_out_disable_dev;
}
pci_set_master(pdev);
@@ -1117,7 +1117,7 @@
dev = alloc_etherdev(sizeof(struct r6040_private));
if (!dev) {
err = -ENOMEM;
- goto err_out;
+ goto err_out_disable_dev;
}
SET_NETDEV_DEV(dev, &pdev->dev);
lp = netdev_priv(dev);
@@ -1233,11 +1233,15 @@
err_out_mdio:
mdiobus_free(lp->mii_bus);
err_out_unmap:
+ netif_napi_del(&lp->napi);
+ pci_set_drvdata(pdev, NULL);
pci_iounmap(pdev, ioaddr);
err_out_free_res:
pci_release_regions(pdev);
err_out_free_dev:
free_netdev(dev);
+err_out_disable_dev:
+ pci_disable_device(pdev);
err_out:
return err;
}
@@ -1251,6 +1255,9 @@
mdiobus_unregister(lp->mii_bus);
kfree(lp->mii_bus->irq);
mdiobus_free(lp->mii_bus);
+ netif_napi_del(&lp->napi);
+ pci_set_drvdata(pdev, NULL);
+ pci_iounmap(pdev, lp->base);
pci_release_regions(pdev);
free_netdev(dev);
pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index be3c221..667169b 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -1101,8 +1101,12 @@
/* Restart Rx engine if stopped. */
/* If we don't need to check status, don't. -KDU */
- if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R))
+ if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R)) {
+ /* fix the values for the next receiving */
+ mdp->cur_rx = mdp->dirty_rx = (sh_eth_read(ndev, RDFAR) -
+ sh_eth_read(ndev, RDLAR)) >> 4;
sh_eth_write(ndev, EDRRR_R, EDRRR);
+ }
return 0;
}
@@ -1199,8 +1203,6 @@
/* Receive Descriptor Empty int */
ndev->stats.rx_over_errors++;
- if (sh_eth_read(ndev, EDRRR) ^ EDRRR_R)
- sh_eth_write(ndev, EDRRR_R, EDRRR);
if (netif_msg_rx_err(mdp))
dev_err(&ndev->dev, "Receive Descriptor Empty\n");
}
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index dab9c6f..1466e5d 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -2390,11 +2390,11 @@
retval = smsc911x_request_resources(pdev);
if (retval)
- goto out_return_resources;
+ goto out_request_resources_fail;
retval = smsc911x_enable_resources(pdev);
if (retval)
- goto out_disable_resources;
+ goto out_enable_resources_fail;
if (pdata->ioaddr == NULL) {
SMSC_WARN(pdata, probe, "Error smsc911x base address invalid");
@@ -2501,8 +2501,9 @@
free_irq(dev->irq, dev);
out_disable_resources:
(void)smsc911x_disable_resources(pdev);
-out_return_resources:
+out_enable_resources_fail:
smsc911x_free_resources(pdev);
+out_request_resources_fail:
platform_set_drvdata(pdev, NULL);
iounmap(pdata->ioaddr);
free_netdev(dev);
diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c
index 71e2b05..3ae80ec 100644
--- a/drivers/net/usb/asix.c
+++ b/drivers/net/usb/asix.c
@@ -35,6 +35,7 @@
#include <linux/crc32.h>
#include <linux/usb/usbnet.h>
#include <linux/slab.h>
+#include <linux/if_vlan.h>
#define DRIVER_VERSION "22-Dec-2011"
#define DRIVER_NAME "asix"
@@ -321,7 +322,7 @@
return 0;
}
- if ((size > dev->net->mtu + ETH_HLEN) ||
+ if ((size > dev->net->mtu + ETH_HLEN + VLAN_HLEN) ||
(size + offset > skb->len)) {
netdev_err(dev->net, "asix_rx_fixup() Bad RX Length %d\n",
size);
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 380dbea..3b20678 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -547,6 +547,8 @@
{QMI_GOBI_DEVICE(0x16d8, 0x8002)}, /* CMDTech Gobi 2000 Modem device (VU922) */
{QMI_GOBI_DEVICE(0x05c6, 0x9205)}, /* Gobi 2000 Modem device */
{QMI_GOBI_DEVICE(0x1199, 0x9013)}, /* Sierra Wireless Gobi 3000 Modem device (MC8355) */
+ {QMI_GOBI_DEVICE(0x1199, 0x9015)}, /* Sierra Wireless Gobi 3000 Modem device */
+ {QMI_GOBI_DEVICE(0x1199, 0x9019)}, /* Sierra Wireless Gobi 3000 Modem device */
{ } /* END */
};
MODULE_DEVICE_TABLE(usb, products);
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 9ce6995..5214b1e 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1231,11 +1231,6 @@
vi->config_enable = false;
mutex_unlock(&vi->config_lock);
- virtqueue_disable_cb(vi->rvq);
- virtqueue_disable_cb(vi->svq);
- if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ))
- virtqueue_disable_cb(vi->cvq);
-
netif_device_detach(vi->dev);
cancel_delayed_work_sync(&vi->refill);
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index 0ba81a6..fbaa309 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -2415,6 +2415,22 @@
* Initialization routines *
\*************************/
+static const struct ieee80211_iface_limit if_limits[] = {
+ { .max = 2048, .types = BIT(NL80211_IFTYPE_STATION) },
+ { .max = 4, .types =
+#ifdef CONFIG_MAC80211_MESH
+ BIT(NL80211_IFTYPE_MESH_POINT) |
+#endif
+ BIT(NL80211_IFTYPE_AP) },
+};
+
+static const struct ieee80211_iface_combination if_comb = {
+ .limits = if_limits,
+ .n_limits = ARRAY_SIZE(if_limits),
+ .max_interfaces = 2048,
+ .num_different_channels = 1,
+};
+
int __devinit
ath5k_init_ah(struct ath5k_hw *ah, const struct ath_bus_ops *bus_ops)
{
@@ -2436,6 +2452,9 @@
BIT(NL80211_IFTYPE_ADHOC) |
BIT(NL80211_IFTYPE_MESH_POINT);
+ hw->wiphy->iface_combinations = &if_comb;
+ hw->wiphy->n_iface_combinations = 1;
+
/* SW support for IBSS_RSN is provided by mac80211 */
hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index ac53d90..dfb0441 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -3809,7 +3809,7 @@
return true;
}
-static void ar9003_hw_internal_regulator_apply(struct ath_hw *ah)
+void ar9003_hw_internal_regulator_apply(struct ath_hw *ah)
{
int internal_regulator =
ath9k_hw_ar9300_get_eeprom(ah, EEP_INTERNAL_REGULATOR);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
index 2505ac4..8396d15 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
@@ -334,4 +334,7 @@
unsigned int ar9003_get_paprd_scale_factor(struct ath_hw *ah,
struct ath9k_channel *chan);
+
+void ar9003_hw_internal_regulator_apply(struct ath_hw *ah);
+
#endif
diff --git a/drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h b/drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h
index f11d9b2..1bd3a3d 100644
--- a/drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h
@@ -1,5 +1,6 @@
/*
- * Copyright (c) 2011 Atheros Communications Inc.
+ * Copyright (c) 2010-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2012 Qualcomm Atheros Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -18,7 +19,7 @@
#define INITVALS_9330_1P1_H
static const u32 ar9331_1p1_baseband_postamble[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8005, 0xd00a8005},
{0x00009820, 0x206a002e, 0x206a002e, 0x206a002e, 0x206a002e},
{0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0},
@@ -27,10 +28,10 @@
{0x00009830, 0x0000059c, 0x0000059c, 0x0000059c, 0x0000059c},
{0x00009c00, 0x00000044, 0x00000044, 0x00000044, 0x00000044},
{0x00009e00, 0x0372161e, 0x0372161e, 0x037216a4, 0x037216a4},
- {0x00009e04, 0x00182020, 0x00182020, 0x00182020, 0x00182020},
+ {0x00009e04, 0x00202020, 0x00202020, 0x00202020, 0x00202020},
{0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2},
{0x00009e10, 0x7ec80d2e, 0x7ec80d2e, 0x7ec80d2e, 0x7ec80d2e},
- {0x00009e14, 0x31395d5e, 0x3139605e, 0x3139605e, 0x31395d5e},
+ {0x00009e14, 0x31365d5e, 0x3136605e, 0x3136605e, 0x31365d5e},
{0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
{0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
@@ -55,7 +56,7 @@
{0x0000a288, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a28c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
- {0x0000a2d0, 0x00071981, 0x00071981, 0x00071981, 0x00071981},
+ {0x0000a2d0, 0x00071982, 0x00071982, 0x00071982, 0x00071982},
{0x0000a2d8, 0xf999a83a, 0xf999a83a, 0xf999a83a, 0xf999a83a},
{0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000ae04, 0x00802020, 0x00802020, 0x00802020, 0x00802020},
@@ -63,7 +64,7 @@
};
static const u32 ar9331_modes_lowest_ob_db_tx_gain_1p1[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x0000a2d8, 0x7999a83a, 0x7999a83a, 0x7999a83a, 0x7999a83a},
{0x0000a2dc, 0xffff2a52, 0xffff2a52, 0xffff2a52, 0xffff2a52},
{0x0000a2e0, 0xffffcc84, 0xffffcc84, 0xffffcc84, 0xffffcc84},
@@ -155,7 +156,7 @@
};
static const u32 ar9331_modes_high_ob_db_tx_gain_1p1[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x0000a2d8, 0x7999a83a, 0x7999a83a, 0x7999a83a, 0x7999a83a},
{0x0000a2dc, 0xffaa9a52, 0xffaa9a52, 0xffaa9a52, 0xffaa9a52},
{0x0000a2e0, 0xffb31c84, 0xffb31c84, 0xffb31c84, 0xffb31c84},
@@ -245,7 +246,7 @@
};
static const u32 ar9331_modes_low_ob_db_tx_gain_1p1[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x0000a2d8, 0x7999a83a, 0x7999a83a, 0x7999a83a, 0x7999a83a},
{0x0000a2dc, 0xffff2a52, 0xffff2a52, 0xffff2a52, 0xffff2a52},
{0x0000a2e0, 0xffffcc84, 0xffffcc84, 0xffffcc84, 0xffffcc84},
@@ -377,14 +378,14 @@
{0x000160b4, 0x92480040},
{0x000160c0, 0x006db6db},
{0x000160c4, 0x0186db60},
- {0x000160c8, 0x6db6db6c},
+ {0x000160c8, 0x6db4db6c},
{0x000160cc, 0x6de6c300},
{0x000160d0, 0x14500820},
{0x00016100, 0x04cb0001},
{0x00016104, 0xfff80015},
{0x00016108, 0x00080010},
{0x0001610c, 0x00170000},
- {0x00016140, 0x10804000},
+ {0x00016140, 0x10800000},
{0x00016144, 0x01884080},
{0x00016148, 0x000080c0},
{0x00016280, 0x01000015},
@@ -417,7 +418,7 @@
};
static const u32 ar9331_1p1_soc_postamble[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x00007010, 0x00000022, 0x00000022, 0x00000022, 0x00000022},
};
@@ -691,7 +692,7 @@
};
static const u32 ar9331_modes_high_power_tx_gain_1p1[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x0000a2d8, 0x7999a83a, 0x7999a83a, 0x7999a83a, 0x7999a83a},
{0x0000a2dc, 0xffff2a52, 0xffff2a52, 0xffff2a52, 0xffff2a52},
{0x0000a2e0, 0xffffcc84, 0xffffcc84, 0xffffcc84, 0xffffcc84},
@@ -783,7 +784,7 @@
};
static const u32 ar9331_1p1_mac_postamble[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
{0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c},
{0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38},
@@ -973,26 +974,27 @@
static const u32 ar9331_common_rx_gain_1p1[][2] = {
/* Addr allmodes */
- {0x0000a000, 0x00010000},
- {0x0000a004, 0x00030002},
- {0x0000a008, 0x00050004},
- {0x0000a00c, 0x00810080},
- {0x0000a010, 0x00830082},
- {0x0000a014, 0x01810180},
- {0x0000a018, 0x01830182},
- {0x0000a01c, 0x01850184},
- {0x0000a020, 0x01890188},
- {0x0000a024, 0x018b018a},
- {0x0000a028, 0x018d018c},
- {0x0000a02c, 0x01910190},
- {0x0000a030, 0x01930192},
- {0x0000a034, 0x01950194},
- {0x0000a038, 0x038a0196},
- {0x0000a03c, 0x038c038b},
- {0x0000a040, 0x0390038d},
- {0x0000a044, 0x03920391},
- {0x0000a048, 0x03940393},
- {0x0000a04c, 0x03960395},
+ {0x00009e18, 0x05000000},
+ {0x0000a000, 0x00060005},
+ {0x0000a004, 0x00810080},
+ {0x0000a008, 0x00830082},
+ {0x0000a00c, 0x00850084},
+ {0x0000a010, 0x01820181},
+ {0x0000a014, 0x01840183},
+ {0x0000a018, 0x01880185},
+ {0x0000a01c, 0x018a0189},
+ {0x0000a020, 0x02850284},
+ {0x0000a024, 0x02890288},
+ {0x0000a028, 0x028b028a},
+ {0x0000a02c, 0x03850384},
+ {0x0000a030, 0x03890388},
+ {0x0000a034, 0x038b038a},
+ {0x0000a038, 0x038d038c},
+ {0x0000a03c, 0x03910390},
+ {0x0000a040, 0x03930392},
+ {0x0000a044, 0x03950394},
+ {0x0000a048, 0x00000396},
+ {0x0000a04c, 0x00000000},
{0x0000a050, 0x00000000},
{0x0000a054, 0x00000000},
{0x0000a058, 0x00000000},
@@ -1005,15 +1007,15 @@
{0x0000a074, 0x00000000},
{0x0000a078, 0x00000000},
{0x0000a07c, 0x00000000},
- {0x0000a080, 0x22222229},
- {0x0000a084, 0x1d1d1d1d},
- {0x0000a088, 0x1d1d1d1d},
- {0x0000a08c, 0x1d1d1d1d},
- {0x0000a090, 0x171d1d1d},
- {0x0000a094, 0x11111717},
- {0x0000a098, 0x00030311},
- {0x0000a09c, 0x00000000},
- {0x0000a0a0, 0x00000000},
+ {0x0000a080, 0x28282828},
+ {0x0000a084, 0x28282828},
+ {0x0000a088, 0x28282828},
+ {0x0000a08c, 0x28282828},
+ {0x0000a090, 0x28282828},
+ {0x0000a094, 0x24242428},
+ {0x0000a098, 0x171e1e1e},
+ {0x0000a09c, 0x02020b0b},
+ {0x0000a0a0, 0x02020202},
{0x0000a0a4, 0x00000000},
{0x0000a0a8, 0x00000000},
{0x0000a0ac, 0x00000000},
@@ -1021,27 +1023,27 @@
{0x0000a0b4, 0x00000000},
{0x0000a0b8, 0x00000000},
{0x0000a0bc, 0x00000000},
- {0x0000a0c0, 0x001f0000},
- {0x0000a0c4, 0x01000101},
- {0x0000a0c8, 0x011e011f},
- {0x0000a0cc, 0x011c011d},
- {0x0000a0d0, 0x02030204},
- {0x0000a0d4, 0x02010202},
- {0x0000a0d8, 0x021f0200},
- {0x0000a0dc, 0x0302021e},
- {0x0000a0e0, 0x03000301},
- {0x0000a0e4, 0x031e031f},
- {0x0000a0e8, 0x0402031d},
- {0x0000a0ec, 0x04000401},
- {0x0000a0f0, 0x041e041f},
- {0x0000a0f4, 0x0502041d},
- {0x0000a0f8, 0x05000501},
- {0x0000a0fc, 0x051e051f},
- {0x0000a100, 0x06010602},
- {0x0000a104, 0x061f0600},
- {0x0000a108, 0x061d061e},
- {0x0000a10c, 0x07020703},
- {0x0000a110, 0x07000701},
+ {0x0000a0c0, 0x22072208},
+ {0x0000a0c4, 0x22052206},
+ {0x0000a0c8, 0x22032204},
+ {0x0000a0cc, 0x22012202},
+ {0x0000a0d0, 0x221f2200},
+ {0x0000a0d4, 0x221d221e},
+ {0x0000a0d8, 0x33023303},
+ {0x0000a0dc, 0x33003301},
+ {0x0000a0e0, 0x331e331f},
+ {0x0000a0e4, 0x4402331d},
+ {0x0000a0e8, 0x44004401},
+ {0x0000a0ec, 0x441e441f},
+ {0x0000a0f0, 0x55025503},
+ {0x0000a0f4, 0x55005501},
+ {0x0000a0f8, 0x551e551f},
+ {0x0000a0fc, 0x6602551d},
+ {0x0000a100, 0x66006601},
+ {0x0000a104, 0x661e661f},
+ {0x0000a108, 0x7703661d},
+ {0x0000a10c, 0x77017702},
+ {0x0000a110, 0x00007700},
{0x0000a114, 0x00000000},
{0x0000a118, 0x00000000},
{0x0000a11c, 0x00000000},
@@ -1054,26 +1056,26 @@
{0x0000a138, 0x00000000},
{0x0000a13c, 0x00000000},
{0x0000a140, 0x001f0000},
- {0x0000a144, 0x01000101},
- {0x0000a148, 0x011e011f},
- {0x0000a14c, 0x011c011d},
- {0x0000a150, 0x02030204},
- {0x0000a154, 0x02010202},
- {0x0000a158, 0x021f0200},
- {0x0000a15c, 0x0302021e},
- {0x0000a160, 0x03000301},
- {0x0000a164, 0x031e031f},
- {0x0000a168, 0x0402031d},
- {0x0000a16c, 0x04000401},
- {0x0000a170, 0x041e041f},
- {0x0000a174, 0x0502041d},
- {0x0000a178, 0x05000501},
- {0x0000a17c, 0x051e051f},
- {0x0000a180, 0x06010602},
- {0x0000a184, 0x061f0600},
- {0x0000a188, 0x061d061e},
- {0x0000a18c, 0x07020703},
- {0x0000a190, 0x07000701},
+ {0x0000a144, 0x111f1100},
+ {0x0000a148, 0x111d111e},
+ {0x0000a14c, 0x111b111c},
+ {0x0000a150, 0x22032204},
+ {0x0000a154, 0x22012202},
+ {0x0000a158, 0x221f2200},
+ {0x0000a15c, 0x221d221e},
+ {0x0000a160, 0x33013302},
+ {0x0000a164, 0x331f3300},
+ {0x0000a168, 0x4402331e},
+ {0x0000a16c, 0x44004401},
+ {0x0000a170, 0x441e441f},
+ {0x0000a174, 0x55015502},
+ {0x0000a178, 0x551f5500},
+ {0x0000a17c, 0x6602551e},
+ {0x0000a180, 0x66006601},
+ {0x0000a184, 0x661e661f},
+ {0x0000a188, 0x7703661d},
+ {0x0000a18c, 0x77017702},
+ {0x0000a190, 0x00007700},
{0x0000a194, 0x00000000},
{0x0000a198, 0x00000000},
{0x0000a19c, 0x00000000},
@@ -1100,14 +1102,14 @@
{0x0000a1f0, 0x00000396},
{0x0000a1f4, 0x00000396},
{0x0000a1f8, 0x00000396},
- {0x0000a1fc, 0x00000196},
+ {0x0000a1fc, 0x00000296},
};
static const u32 ar9331_common_tx_gain_offset1_1[][1] = {
- {0},
- {3},
- {0},
- {0},
+ {0x00000000},
+ {0x00000003},
+ {0x00000000},
+ {0x00000000},
};
static const u32 ar9331_1p1_chansel_xtal_25M[] = {
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index abe05ec..7db1890 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -1468,6 +1468,9 @@
return false;
ah->chip_fullsleep = false;
+
+ if (AR_SREV_9330(ah))
+ ar9003_hw_internal_regulator_apply(ah);
ath9k_hw_init_pll(ah, chan);
ath9k_hw_set_rfmode(ah, chan);
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index dfa78e8..4de4473 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -239,7 +239,7 @@
{
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
- bool ret;
+ bool ret = true;
ieee80211_stop_queues(sc->hw);
@@ -250,11 +250,12 @@
ath9k_debug_samp_bb_mac(sc);
ath9k_hw_disable_interrupts(ah);
- ret = ath_drain_all_txq(sc, retry_tx);
-
if (!ath_stoprecv(sc))
ret = false;
+ if (!ath_drain_all_txq(sc, retry_tx))
+ ret = false;
+
if (!flush) {
if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
ath_rx_tasklet(sc, 1, true);
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 23eaa1b..d59dd01 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -64,7 +64,8 @@
static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
struct ath_txq *txq,
struct ath_atx_tid *tid,
- struct sk_buff *skb);
+ struct sk_buff *skb,
+ bool dequeue);
enum {
MCS_HT20,
@@ -811,7 +812,7 @@
fi = get_frame_info(skb);
bf = fi->bf;
if (!fi->bf)
- bf = ath_tx_setup_buffer(sc, txq, tid, skb);
+ bf = ath_tx_setup_buffer(sc, txq, tid, skb, true);
if (!bf)
continue;
@@ -1726,7 +1727,7 @@
return;
}
- bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
+ bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb, false);
if (!bf)
return;
@@ -1753,7 +1754,7 @@
bf = fi->bf;
if (!bf)
- bf = ath_tx_setup_buffer(sc, txq, tid, skb);
+ bf = ath_tx_setup_buffer(sc, txq, tid, skb, false);
if (!bf)
return;
@@ -1814,7 +1815,8 @@
static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
struct ath_txq *txq,
struct ath_atx_tid *tid,
- struct sk_buff *skb)
+ struct sk_buff *skb,
+ bool dequeue)
{
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath_frame_info *fi = get_frame_info(skb);
@@ -1863,6 +1865,8 @@
return bf;
error:
+ if (dequeue)
+ __skb_unlink(skb, &tid->buf_q);
dev_kfree_skb_any(skb);
return NULL;
}
@@ -1893,7 +1897,7 @@
*/
ath_tx_send_ampdu(sc, tid, skb, txctl);
} else {
- bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
+ bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb, false);
if (!bf)
return;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
index c5a34ff..a299d42 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
@@ -28,6 +28,7 @@
#include <linux/uaccess.h>
#include <linux/firmware.h>
#include <linux/usb.h>
+#include <linux/vmalloc.h>
#include <net/cfg80211.h>
#include <defs.h>
@@ -1239,7 +1240,7 @@
return -EINVAL;
}
- devinfo->image = kmalloc(fw->size, GFP_ATOMIC); /* plus nvram */
+ devinfo->image = vmalloc(fw->size); /* plus nvram */
if (!devinfo->image)
return -ENOMEM;
@@ -1603,7 +1604,7 @@
void brcmf_usb_exit(void)
{
usb_deregister(&brcmf_usbdrvr);
- kfree(g_image.data);
+ vfree(g_image.data);
g_image.data = NULL;
g_image.len = 0;
}
diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig
index db6c6e5..2463c06 100644
--- a/drivers/net/wireless/iwlwifi/Kconfig
+++ b/drivers/net/wireless/iwlwifi/Kconfig
@@ -137,11 +137,3 @@
even if the microcode doesn't advertise it.
Say Y only if you want to experiment with MFP.
-
-config IWLWIFI_UCODE16
- bool "support uCode 16.0"
- depends on IWLWIFI
- help
- This option enables support for uCode version 16.0.
-
- Say Y if you want to use 16.0 microcode.
diff --git a/drivers/net/wireless/iwlwifi/Makefile b/drivers/net/wireless/iwlwifi/Makefile
index 406f297..d615eac 100644
--- a/drivers/net/wireless/iwlwifi/Makefile
+++ b/drivers/net/wireless/iwlwifi/Makefile
@@ -18,7 +18,6 @@
iwlwifi-objs += iwl-trans-pcie.o iwl-trans-pcie-rx.o iwl-trans-pcie-tx.o
-iwlwifi-$(CONFIG_IWLWIFI_UCODE16) += iwl-phy-db.o
iwlwifi-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-debugfs.o
iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o
iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TESTMODE) += iwl-testmode.o
diff --git a/drivers/net/wireless/iwlwifi/iwl-2000.c b/drivers/net/wireless/iwlwifi/iwl-2000.c
index 7f79341..8133105 100644
--- a/drivers/net/wireless/iwlwifi/iwl-2000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-2000.c
@@ -79,7 +79,7 @@
.chain_noise_scale = 1000,
.wd_timeout = IWL_DEF_WD_TIMEOUT,
.max_event_log_size = 512,
- .shadow_reg_enable = true,
+ .shadow_reg_enable = false, /* TODO: fix bugs using this feature */
.hd_v2 = true,
};
@@ -97,7 +97,7 @@
.chain_noise_scale = 1000,
.wd_timeout = IWL_LONG_WD_TIMEOUT,
.max_event_log_size = 512,
- .shadow_reg_enable = true,
+ .shadow_reg_enable = false, /* TODO: fix bugs using this feature */
.hd_v2 = true,
};
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index 381b02c..19f7ee8 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -86,7 +86,7 @@
.chain_noise_scale = 1000,
.wd_timeout = IWL_DEF_WD_TIMEOUT,
.max_event_log_size = 512,
- .shadow_reg_enable = true,
+ .shadow_reg_enable = false, /* TODO: fix bugs using this feature */
};
static const struct iwl_base_params iwl6050_base_params = {
@@ -102,7 +102,7 @@
.chain_noise_scale = 1500,
.wd_timeout = IWL_DEF_WD_TIMEOUT,
.max_event_log_size = 1024,
- .shadow_reg_enable = true,
+ .shadow_reg_enable = false, /* TODO: fix bugs using this feature */
};
static const struct iwl_base_params iwl6000_g2_base_params = {
@@ -118,7 +118,7 @@
.chain_noise_scale = 1000,
.wd_timeout = IWL_LONG_WD_TIMEOUT,
.max_event_log_size = 512,
- .shadow_reg_enable = true,
+ .shadow_reg_enable = false, /* TODO: fix bugs using this feature */
};
static const struct iwl_ht_params iwl6000_ht_params = {
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
index 51e1a69..8cebd7c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
@@ -884,6 +884,7 @@
if ((priv->bt_traffic_load != priv->last_bt_traffic_load) ||
(priv->bt_full_concurrent != full_concurrent)) {
priv->bt_full_concurrent = full_concurrent;
+ priv->last_bt_traffic_load = priv->bt_traffic_load;
/* Update uCode's rate table. */
tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
index b31584e..aea07aa 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
@@ -772,7 +772,7 @@
~IWL_STA_DRIVER_ACTIVE;
priv->stations[i].used &=
~IWL_STA_UCODE_INPROGRESS;
- spin_unlock_bh(&priv->sta_lock);
+ continue;
}
/*
* Rate scaling has already been initialized, send
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c
index 3c72bad..d742900 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.c
@@ -657,17 +657,17 @@
return -EINVAL;
}
-static int alloc_pci_desc(struct iwl_drv *drv,
- struct iwl_firmware_pieces *pieces,
- enum iwl_ucode_type type)
+static int iwl_alloc_ucode(struct iwl_drv *drv,
+ struct iwl_firmware_pieces *pieces,
+ enum iwl_ucode_type type)
{
int i;
for (i = 0;
i < IWL_UCODE_SECTION_MAX && get_sec_size(pieces, type, i);
i++)
if (iwl_alloc_fw_desc(drv, &(drv->fw.img[type].sec[i]),
- get_sec(pieces, type, i)))
- return -1;
+ get_sec(pieces, type, i)))
+ return -ENOMEM;
return 0;
}
@@ -825,8 +825,8 @@
* 1) unmodified from disk
* 2) backup cache for save/restore during power-downs */
for (i = 0; i < IWL_UCODE_TYPE_MAX; i++)
- if (alloc_pci_desc(drv, &pieces, i))
- goto err_pci_alloc;
+ if (iwl_alloc_ucode(drv, &pieces, i))
+ goto out_free_fw;
/* Now that we can no longer fail, copy information */
@@ -866,7 +866,7 @@
drv->op_mode = iwl_dvm_ops.start(drv->trans, drv->cfg, &drv->fw);
if (!drv->op_mode)
- goto out_unbind;
+ goto out_free_fw;
return;
@@ -877,7 +877,7 @@
goto out_unbind;
return;
- err_pci_alloc:
+ out_free_fw:
IWL_ERR(drv, "failed to allocate pci memory\n");
iwl_dealloc_ucode(drv);
release_firmware(ucode_raw);
diff --git a/drivers/net/wireless/iwlwifi/iwl-phy-db.c b/drivers/net/wireless/iwlwifi/iwl-phy-db.c
deleted file mode 100644
index f166955..0000000
--- a/drivers/net/wireless/iwlwifi/iwl-phy-db.c
+++ /dev/null
@@ -1,288 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *****************************************************************************/
-
-#include <linux/slab.h>
-#include <linux/string.h>
-
-#include "iwl-debug.h"
-#include "iwl-dev.h"
-
-#include "iwl-phy-db.h"
-
-#define CHANNEL_NUM_SIZE 4 /* num of channels in calib_ch size */
-
-struct iwl_phy_db *iwl_phy_db_init(struct device *dev)
-{
- struct iwl_phy_db *phy_db = kzalloc(sizeof(struct iwl_phy_db),
- GFP_KERNEL);
-
- if (!phy_db)
- return phy_db;
-
- phy_db->dev = dev;
-
- /* TODO: add default values of the phy db. */
- return phy_db;
-}
-
-/*
- * get phy db section: returns a pointer to a phy db section specified by
- * type and channel group id.
- */
-static struct iwl_phy_db_entry *
-iwl_phy_db_get_section(struct iwl_phy_db *phy_db,
- enum iwl_phy_db_section_type type,
- u16 chg_id)
-{
- if (!phy_db || type < 0 || type >= IWL_PHY_DB_MAX)
- return NULL;
-
- switch (type) {
- case IWL_PHY_DB_CFG:
- return &phy_db->cfg;
- case IWL_PHY_DB_CALIB_NCH:
- return &phy_db->calib_nch;
- case IWL_PHY_DB_CALIB_CH:
- return &phy_db->calib_ch;
- case IWL_PHY_DB_CALIB_CHG_PAPD:
- if (chg_id < 0 || chg_id >= IWL_NUM_PAPD_CH_GROUPS)
- return NULL;
- return &phy_db->calib_ch_group_papd[chg_id];
- case IWL_PHY_DB_CALIB_CHG_TXP:
- if (chg_id < 0 || chg_id >= IWL_NUM_TXP_CH_GROUPS)
- return NULL;
- return &phy_db->calib_ch_group_txp[chg_id];
- default:
- return NULL;
- }
- return NULL;
-}
-
-static void iwl_phy_db_free_section(struct iwl_phy_db *phy_db,
- enum iwl_phy_db_section_type type,
- u16 chg_id)
-{
- struct iwl_phy_db_entry *entry =
- iwl_phy_db_get_section(phy_db, type, chg_id);
- if (!entry)
- return;
-
- kfree(entry->data);
- entry->data = NULL;
- entry->size = 0;
-}
-
-void iwl_phy_db_free(struct iwl_phy_db *phy_db)
-{
- int i;
-
- if (!phy_db)
- return;
-
- iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CFG, 0);
- iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_NCH, 0);
- iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_CH, 0);
- for (i = 0; i < IWL_NUM_PAPD_CH_GROUPS; i++)
- iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_CHG_PAPD, i);
- for (i = 0; i < IWL_NUM_TXP_CH_GROUPS; i++)
- iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_CHG_TXP, i);
-
- kfree(phy_db);
-}
-
-int iwl_phy_db_set_section(struct iwl_phy_db *phy_db,
- enum iwl_phy_db_section_type type, u8 *data,
- u16 size, gfp_t alloc_ctx)
-{
- struct iwl_phy_db_entry *entry;
- u16 chg_id = 0;
-
- if (!phy_db)
- return -EINVAL;
-
- if (type == IWL_PHY_DB_CALIB_CHG_PAPD ||
- type == IWL_PHY_DB_CALIB_CHG_TXP)
- chg_id = le16_to_cpup((__le16 *)data);
-
- entry = iwl_phy_db_get_section(phy_db, type, chg_id);
- if (!entry)
- return -EINVAL;
-
- kfree(entry->data);
- entry->data = kmemdup(data, size, alloc_ctx);
- if (!entry->data) {
- entry->size = 0;
- return -ENOMEM;
- }
-
- entry->size = size;
-
- if (type == IWL_PHY_DB_CALIB_CH) {
- phy_db->channel_num = le32_to_cpup((__le32 *)data);
- phy_db->channel_size =
- (size - CHANNEL_NUM_SIZE) / phy_db->channel_num;
- }
-
- return 0;
-}
-
-static int is_valid_channel(u16 ch_id)
-{
- if (ch_id <= 14 ||
- (36 <= ch_id && ch_id <= 64 && ch_id % 4 == 0) ||
- (100 <= ch_id && ch_id <= 140 && ch_id % 4 == 0) ||
- (145 <= ch_id && ch_id <= 165 && ch_id % 4 == 1))
- return 1;
- return 0;
-}
-
-static u8 ch_id_to_ch_index(u16 ch_id)
-{
- if (WARN_ON(!is_valid_channel(ch_id)))
- return 0xff;
-
- if (ch_id <= 14)
- return ch_id - 1;
- if (ch_id <= 64)
- return (ch_id + 20) / 4;
- if (ch_id <= 140)
- return (ch_id - 12) / 4;
- return (ch_id - 13) / 4;
-}
-
-
-static u16 channel_id_to_papd(u16 ch_id)
-{
- if (WARN_ON(!is_valid_channel(ch_id)))
- return 0xff;
-
- if (1 <= ch_id && ch_id <= 14)
- return 0;
- if (36 <= ch_id && ch_id <= 64)
- return 1;
- if (100 <= ch_id && ch_id <= 140)
- return 2;
- return 3;
-}
-
-static u16 channel_id_to_txp(struct iwl_phy_db *phy_db, u16 ch_id)
-{
- struct iwl_phy_db_chg_txp *txp_chg;
- int i;
- u8 ch_index = ch_id_to_ch_index(ch_id);
- if (ch_index == 0xff)
- return 0xff;
-
- for (i = 0; i < IWL_NUM_TXP_CH_GROUPS; i++) {
- txp_chg = (void *)phy_db->calib_ch_group_txp[i].data;
- if (!txp_chg)
- return 0xff;
- /*
- * Looking for the first channel group that its max channel is
- * higher then wanted channel.
- */
- if (le16_to_cpu(txp_chg->max_channel_idx) >= ch_index)
- return i;
- }
- return 0xff;
-}
-
-int iwl_phy_db_get_section_data(struct iwl_phy_db *phy_db,
- enum iwl_phy_db_section_type type, u8 **data,
- u16 *size, u16 ch_id)
-{
- struct iwl_phy_db_entry *entry;
- u32 channel_num;
- u32 channel_size;
- u16 ch_group_id = 0;
- u16 index;
-
- if (!phy_db)
- return -EINVAL;
-
- /* find wanted channel group */
- if (type == IWL_PHY_DB_CALIB_CHG_PAPD)
- ch_group_id = channel_id_to_papd(ch_id);
- else if (type == IWL_PHY_DB_CALIB_CHG_TXP)
- ch_group_id = channel_id_to_txp(phy_db, ch_id);
-
- entry = iwl_phy_db_get_section(phy_db, type, ch_group_id);
- if (!entry)
- return -EINVAL;
-
- if (type == IWL_PHY_DB_CALIB_CH) {
- index = ch_id_to_ch_index(ch_id);
- channel_num = phy_db->channel_num;
- channel_size = phy_db->channel_size;
- if (index >= channel_num) {
- IWL_ERR(phy_db, "Wrong channel number %d", ch_id);
- return -EINVAL;
- }
- *data = entry->data + CHANNEL_NUM_SIZE + index * channel_size;
- *size = channel_size;
- } else {
- *data = entry->data;
- *size = entry->size;
- }
- return 0;
-}
diff --git a/drivers/net/wireless/iwlwifi/iwl-phy-db.h b/drivers/net/wireless/iwlwifi/iwl-phy-db.h
deleted file mode 100644
index c34c6a9..0000000
--- a/drivers/net/wireless/iwlwifi/iwl-phy-db.h
+++ /dev/null
@@ -1,129 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *****************************************************************************/
-
-#ifndef __IWL_PHYDB_H__
-#define __IWL_PHYDB_H__
-
-#include <linux/types.h>
-
-#define IWL_NUM_PAPD_CH_GROUPS 4
-#define IWL_NUM_TXP_CH_GROUPS 8
-
-struct iwl_phy_db_entry {
- u16 size;
- u8 *data;
-};
-
-struct iwl_shared;
-
-/**
- * struct iwl_phy_db - stores phy configuration and calibration data.
- *
- * @cfg: phy configuration.
- * @calib_nch: non channel specific calibration data.
- * @calib_ch: channel specific calibration data.
- * @calib_ch_group_papd: calibration data related to papd channel group.
- * @calib_ch_group_txp: calibration data related to tx power chanel group.
- */
-struct iwl_phy_db {
- struct iwl_phy_db_entry cfg;
- struct iwl_phy_db_entry calib_nch;
- struct iwl_phy_db_entry calib_ch;
- struct iwl_phy_db_entry calib_ch_group_papd[IWL_NUM_PAPD_CH_GROUPS];
- struct iwl_phy_db_entry calib_ch_group_txp[IWL_NUM_TXP_CH_GROUPS];
-
- u32 channel_num;
- u32 channel_size;
-
- /* for an access to the logger */
- struct device *dev;
-};
-
-enum iwl_phy_db_section_type {
- IWL_PHY_DB_CFG = 1,
- IWL_PHY_DB_CALIB_NCH,
- IWL_PHY_DB_CALIB_CH,
- IWL_PHY_DB_CALIB_CHG_PAPD,
- IWL_PHY_DB_CALIB_CHG_TXP,
- IWL_PHY_DB_MAX
-};
-
-/* for parsing of tx power channel group data that comes from the firmware*/
-struct iwl_phy_db_chg_txp {
- __le32 space;
- __le16 max_channel_idx;
-} __packed;
-
-struct iwl_phy_db *iwl_phy_db_init(struct device *dev);
-
-void iwl_phy_db_free(struct iwl_phy_db *phy_db);
-
-int iwl_phy_db_set_section(struct iwl_phy_db *phy_db,
- enum iwl_phy_db_section_type type, u8 *data,
- u16 size, gfp_t alloc_ctx);
-
-int iwl_phy_db_get_section_data(struct iwl_phy_db *phy_db,
- enum iwl_phy_db_section_type type, u8 **data,
- u16 *size, u16 ch_id);
-
-#endif /* __IWL_PHYDB_H__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h
index 6213c05..e959207 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h
@@ -347,7 +347,7 @@
void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans, int queue, int fifo,
int sta_id, int tid, int frame_limit, u16 ssn);
void iwlagn_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
- int index, enum dma_data_direction dma_dir);
+ enum dma_data_direction dma_dir);
int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index,
struct sk_buff_head *skbs);
int iwl_queue_space(const struct iwl_queue *q);
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c
index 21a8a67..a875023 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c
@@ -204,33 +204,39 @@
for (i = 1; i < num_tbs; i++)
dma_unmap_single(trans->dev, iwl_tfd_tb_get_addr(tfd, i),
iwl_tfd_tb_get_len(tfd, i), dma_dir);
+
+ tfd->num_tbs = 0;
}
/**
* iwlagn_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
* @trans - transport private data
* @txq - tx queue
- * @index - the index of the TFD to be freed
- *@dma_dir - the direction of the DMA mapping
+ * @dma_dir - the direction of the DMA mapping
*
* Does NOT advance any TFD circular buffer read/write indexes
* Does NOT free the TFD itself (which is within circular buffer)
*/
void iwlagn_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
- int index, enum dma_data_direction dma_dir)
+ enum dma_data_direction dma_dir)
{
struct iwl_tfd *tfd_tmp = txq->tfds;
+ /* rd_ptr is bounded by n_bd and idx is bounded by n_window */
+ int rd_ptr = txq->q.read_ptr;
+ int idx = get_cmd_index(&txq->q, rd_ptr);
+
lockdep_assert_held(&txq->lock);
- iwlagn_unmap_tfd(trans, &txq->entries[index].meta,
- &tfd_tmp[index], dma_dir);
+ /* We have only q->n_window txq->entries, but we use q->n_bd tfds */
+ iwlagn_unmap_tfd(trans, &txq->entries[idx].meta,
+ &tfd_tmp[rd_ptr], dma_dir);
/* free SKB */
if (txq->entries) {
struct sk_buff *skb;
- skb = txq->entries[index].skb;
+ skb = txq->entries[idx].skb;
/* Can be called from irqs-disabled context
* If skb is not NULL, it means that the whole queue is being
@@ -238,7 +244,7 @@
*/
if (skb) {
iwl_op_mode_free_skb(trans->op_mode, skb);
- txq->entries[index].skb = NULL;
+ txq->entries[idx].skb = NULL;
}
}
}
@@ -973,7 +979,7 @@
iwlagn_txq_inval_byte_cnt_tbl(trans, txq);
- iwlagn_txq_free_tfd(trans, txq, txq->q.read_ptr, DMA_TO_DEVICE);
+ iwlagn_txq_free_tfd(trans, txq, DMA_TO_DEVICE);
freed++;
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
index 2e57161..ec6fb39 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
@@ -435,9 +435,7 @@
spin_lock_bh(&txq->lock);
while (q->write_ptr != q->read_ptr) {
- /* The read_ptr needs to bound by q->n_window */
- iwlagn_txq_free_tfd(trans, txq, get_cmd_index(q, q->read_ptr),
- dma_dir);
+ iwlagn_txq_free_tfd(trans, txq, dma_dir);
q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
}
spin_unlock_bh(&txq->lock);
diff --git a/drivers/net/wireless/ti/wl1251/sdio.c b/drivers/net/wireless/ti/wl1251/sdio.c
index 1b851f6..e2750a1 100644
--- a/drivers/net/wireless/ti/wl1251/sdio.c
+++ b/drivers/net/wireless/ti/wl1251/sdio.c
@@ -260,6 +260,7 @@
}
if (wl->irq) {
+ irq_set_status_flags(wl->irq, IRQ_NOAUTOEN);
ret = request_irq(wl->irq, wl1251_line_irq, 0, "wl1251", wl);
if (ret < 0) {
wl1251_error("request_irq() failed: %d", ret);
@@ -267,7 +268,6 @@
}
irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
- disable_irq(wl->irq);
wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
diff --git a/drivers/net/wireless/ti/wl1251/spi.c b/drivers/net/wireless/ti/wl1251/spi.c
index 6248c35..87f6305 100644
--- a/drivers/net/wireless/ti/wl1251/spi.c
+++ b/drivers/net/wireless/ti/wl1251/spi.c
@@ -281,6 +281,7 @@
wl->use_eeprom = pdata->use_eeprom;
+ irq_set_status_flags(wl->irq, IRQ_NOAUTOEN);
ret = request_irq(wl->irq, wl1251_irq, 0, DRIVER_NAME, wl);
if (ret < 0) {
wl1251_error("request_irq() failed: %d", ret);
@@ -289,8 +290,6 @@
irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
- disable_irq(wl->irq);
-
ret = wl1251_init_ieee80211(wl);
if (ret)
goto out_irq;
diff --git a/drivers/net/wireless/ti/wlcore/acx.c b/drivers/net/wireless/ti/wlcore/acx.c
index 509aa88..f3d6fa5 100644
--- a/drivers/net/wireless/ti/wlcore/acx.c
+++ b/drivers/net/wireless/ti/wlcore/acx.c
@@ -1715,6 +1715,7 @@
}
+#ifdef CONFIG_PM
/* Set the global behaviour of RX filters - On/Off + default action */
int wl1271_acx_default_rx_filter_enable(struct wl1271 *wl, bool enable,
enum rx_filter_action action)
@@ -1794,3 +1795,4 @@
kfree(acx);
return ret;
}
+#endif /* CONFIG_PM */
diff --git a/drivers/net/wireless/ti/wlcore/acx.h b/drivers/net/wireless/ti/wlcore/acx.h
index 8106b2e..e6a7486 100644
--- a/drivers/net/wireless/ti/wlcore/acx.h
+++ b/drivers/net/wireless/ti/wlcore/acx.h
@@ -1330,9 +1330,11 @@
int wl1271_acx_fm_coex(struct wl1271 *wl);
int wl12xx_acx_set_rate_mgmt_params(struct wl1271 *wl);
int wl12xx_acx_config_hangover(struct wl1271 *wl);
+
+#ifdef CONFIG_PM
int wl1271_acx_default_rx_filter_enable(struct wl1271 *wl, bool enable,
enum rx_filter_action action);
int wl1271_acx_set_rx_filter(struct wl1271 *wl, u8 index, bool enable,
struct wl12xx_rx_filter *filter);
-
+#endif /* CONFIG_PM */
#endif /* __WL1271_ACX_H__ */
diff --git a/drivers/net/wireless/ti/wlcore/rx.c b/drivers/net/wireless/ti/wlcore/rx.c
index 1f1d948..d6a3c6b 100644
--- a/drivers/net/wireless/ti/wlcore/rx.c
+++ b/drivers/net/wireless/ti/wlcore/rx.c
@@ -279,6 +279,7 @@
wl12xx_rearm_rx_streaming(wl, active_hlids);
}
+#ifdef CONFIG_PM
int wl1271_rx_filter_enable(struct wl1271 *wl,
int index, bool enable,
struct wl12xx_rx_filter *filter)
@@ -314,3 +315,4 @@
wl1271_rx_filter_enable(wl, i, 0, NULL);
}
}
+#endif /* CONFIG_PM */
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 2596401..f4a6fca 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -325,8 +325,7 @@
unsigned int count;
int i, copy_off;
- count = DIV_ROUND_UP(
- offset_in_page(skb->data)+skb_headlen(skb), PAGE_SIZE);
+ count = DIV_ROUND_UP(skb_headlen(skb), PAGE_SIZE);
copy_off = skb_headlen(skb) % PAGE_SIZE;
diff --git a/drivers/nfc/pn544_hci.c b/drivers/nfc/pn544_hci.c
index 46f4a9f..281f18c 100644
--- a/drivers/nfc/pn544_hci.c
+++ b/drivers/nfc/pn544_hci.c
@@ -232,7 +232,7 @@
static int check_crc(u8 *buf, int buflen)
{
- u8 len;
+ int len;
u16 crc;
len = buf[0] + 1;
diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
index d6f8ada..8ea7bcc 100644
--- a/drivers/remoteproc/remoteproc_core.c
+++ b/drivers/remoteproc/remoteproc_core.c
@@ -78,7 +78,7 @@
* the recovery of the remote processor.
*/
static int rproc_iommu_fault(struct iommu_domain *domain, struct device *dev,
- unsigned long iova, int flags)
+ unsigned long iova, int flags, void *token)
{
dev_err(dev, "iommu fault: da 0x%lx flags 0x%x\n", iova, flags);
@@ -117,7 +117,7 @@
return -ENOMEM;
}
- iommu_set_fault_handler(domain, rproc_iommu_fault);
+ iommu_set_fault_handler(domain, rproc_iommu_fault, rproc);
ret = iommu_attach_device(domain, dev);
if (ret) {
diff --git a/drivers/video/matrox/matroxfb_maven.c b/drivers/video/matrox/matroxfb_maven.c
index 31b8f67..217678e 100644
--- a/drivers/video/matrox/matroxfb_maven.c
+++ b/drivers/video/matrox/matroxfb_maven.c
@@ -1243,6 +1243,7 @@
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WRITE_WORD_DATA |
I2C_FUNC_SMBUS_BYTE_DATA |
+ I2C_FUNC_NOSTART |
I2C_FUNC_PROTOCOL_MANGLING))
goto ERROR0;
if (!(data = kzalloc(sizeof(*data), GFP_KERNEL))) {
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index d92d748..fe819b7 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -64,6 +64,18 @@
To compile this driver as a module, choose M here: the
module will be called softdog.
+config DA9052_WATCHDOG
+ tristate "Dialog DA9052 Watchdog"
+ depends on PMIC_DA9052
+ select WATCHDOG_CORE
+ help
+ Support for the watchdog in the DA9052 PMIC. Watchdog trigger
+ cause system reset.
+
+ Say Y here to include support for the DA9052 watchdog.
+ Alternatively say M to compile the driver as a module,
+ which will be called da9052_wdt.
+
config WM831X_WATCHDOG
tristate "WM831x watchdog"
depends on MFD_WM831X
@@ -87,6 +99,7 @@
config ARM_SP805_WATCHDOG
tristate "ARM SP805 Watchdog"
depends on ARM_AMBA
+ select WATCHDOG_CORE
help
ARM Primecell SP805 Watchdog timer. This will reboot your system when
the timeout is reached.
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index 442bfbe..572b39b 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -163,6 +163,7 @@
obj-$(CONFIG_XEN_WDT) += xen_wdt.o
# Architecture Independent
+obj-$(CONFIG_DA9052_WATCHDOG) += da9052_wdt.o
obj-$(CONFIG_WM831X_WATCHDOG) += wm831x_wdt.o
obj-$(CONFIG_WM8350_WATCHDOG) += wm8350_wdt.o
obj-$(CONFIG_MAX63XX_WATCHDOG) += max63xx_wdt.o
diff --git a/drivers/watchdog/da9052_wdt.c b/drivers/watchdog/da9052_wdt.c
new file mode 100644
index 0000000..3f75129
--- /dev/null
+++ b/drivers/watchdog/da9052_wdt.c
@@ -0,0 +1,251 @@
+/*
+ * System monitoring driver for DA9052 PMICs.
+ *
+ * Copyright(c) 2012 Dialog Semiconductor Ltd.
+ *
+ * Author: Anthony Olech <Anthony.Olech@diasemi.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/uaccess.h>
+#include <linux/platform_device.h>
+#include <linux/time.h>
+#include <linux/watchdog.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/jiffies.h>
+#include <linux/delay.h>
+
+#include <linux/mfd/da9052/reg.h>
+#include <linux/mfd/da9052/da9052.h>
+
+#define DA9052_DEF_TIMEOUT 4
+#define DA9052_TWDMIN 256
+
+struct da9052_wdt_data {
+ struct watchdog_device wdt;
+ struct da9052 *da9052;
+ struct kref kref;
+ unsigned long jpast;
+};
+
+static const struct {
+ u8 reg_val;
+ int time; /* Seconds */
+} da9052_wdt_maps[] = {
+ { 1, 2 },
+ { 2, 4 },
+ { 3, 8 },
+ { 4, 16 },
+ { 5, 32 },
+ { 5, 33 }, /* Actual time 32.768s so included both 32s and 33s */
+ { 6, 65 },
+ { 6, 66 }, /* Actual time 65.536s so include both, 65s and 66s */
+ { 7, 131 },
+};
+
+
+static void da9052_wdt_release_resources(struct kref *r)
+{
+ struct da9052_wdt_data *driver_data =
+ container_of(r, struct da9052_wdt_data, kref);
+
+ kfree(driver_data);
+}
+
+static int da9052_wdt_set_timeout(struct watchdog_device *wdt_dev,
+ unsigned int timeout)
+{
+ struct da9052_wdt_data *driver_data = watchdog_get_drvdata(wdt_dev);
+ struct da9052 *da9052 = driver_data->da9052;
+ int ret, i;
+
+ /*
+ * Disable the Watchdog timer before setting
+ * new time out.
+ */
+ ret = da9052_reg_update(da9052, DA9052_CONTROL_D_REG,
+ DA9052_CONTROLD_TWDSCALE, 0);
+ if (ret < 0) {
+ dev_err(da9052->dev, "Failed to disable watchdog bit, %d\n",
+ ret);
+ return ret;
+ }
+ if (timeout) {
+ /*
+ * To change the timeout, da9052 needs to
+ * be disabled for at least 150 us.
+ */
+ udelay(150);
+
+ /* Set the desired timeout */
+ for (i = 0; i < ARRAY_SIZE(da9052_wdt_maps); i++)
+ if (da9052_wdt_maps[i].time == timeout)
+ break;
+
+ if (i == ARRAY_SIZE(da9052_wdt_maps))
+ ret = -EINVAL;
+ else
+ ret = da9052_reg_update(da9052, DA9052_CONTROL_D_REG,
+ DA9052_CONTROLD_TWDSCALE,
+ da9052_wdt_maps[i].reg_val);
+ if (ret < 0) {
+ dev_err(da9052->dev,
+ "Failed to update timescale bit, %d\n", ret);
+ return ret;
+ }
+
+ wdt_dev->timeout = timeout;
+ driver_data->jpast = jiffies;
+ }
+
+ return 0;
+}
+
+static void da9052_wdt_ref(struct watchdog_device *wdt_dev)
+{
+ struct da9052_wdt_data *driver_data = watchdog_get_drvdata(wdt_dev);
+
+ kref_get(&driver_data->kref);
+}
+
+static void da9052_wdt_unref(struct watchdog_device *wdt_dev)
+{
+ struct da9052_wdt_data *driver_data = watchdog_get_drvdata(wdt_dev);
+
+ kref_put(&driver_data->kref, da9052_wdt_release_resources);
+}
+
+static int da9052_wdt_start(struct watchdog_device *wdt_dev)
+{
+ return da9052_wdt_set_timeout(wdt_dev, wdt_dev->timeout);
+}
+
+static int da9052_wdt_stop(struct watchdog_device *wdt_dev)
+{
+ return da9052_wdt_set_timeout(wdt_dev, 0);
+}
+
+static int da9052_wdt_ping(struct watchdog_device *wdt_dev)
+{
+ struct da9052_wdt_data *driver_data = watchdog_get_drvdata(wdt_dev);
+ struct da9052 *da9052 = driver_data->da9052;
+ unsigned long msec, jnow = jiffies;
+ int ret;
+
+ /*
+ * We have a minimum time for watchdog window called TWDMIN. A write
+ * to the watchdog before this elapsed time should cause an error.
+ */
+ msec = (jnow - driver_data->jpast) * 1000/HZ;
+ if (msec < DA9052_TWDMIN)
+ mdelay(msec);
+
+ /* Reset the watchdog timer */
+ ret = da9052_reg_update(da9052, DA9052_CONTROL_D_REG,
+ DA9052_CONTROLD_WATCHDOG, 1 << 7);
+ if (ret < 0)
+ goto err_strobe;
+
+ /*
+ * FIXME: Reset the watchdog core, in general PMIC
+ * is supposed to do this
+ */
+ ret = da9052_reg_update(da9052, DA9052_CONTROL_D_REG,
+ DA9052_CONTROLD_WATCHDOG, 0 << 7);
+err_strobe:
+ return ret;
+}
+
+static struct watchdog_info da9052_wdt_info = {
+ .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING,
+ .identity = "DA9052 Watchdog",
+};
+
+static const struct watchdog_ops da9052_wdt_ops = {
+ .owner = THIS_MODULE,
+ .start = da9052_wdt_start,
+ .stop = da9052_wdt_stop,
+ .ping = da9052_wdt_ping,
+ .set_timeout = da9052_wdt_set_timeout,
+ .ref = da9052_wdt_ref,
+ .unref = da9052_wdt_unref,
+};
+
+
+static int __devinit da9052_wdt_probe(struct platform_device *pdev)
+{
+ struct da9052 *da9052 = dev_get_drvdata(pdev->dev.parent);
+ struct da9052_wdt_data *driver_data;
+ struct watchdog_device *da9052_wdt;
+ int ret;
+
+ driver_data = devm_kzalloc(&pdev->dev, sizeof(*driver_data),
+ GFP_KERNEL);
+ if (!driver_data) {
+ dev_err(da9052->dev, "Unable to alloacate watchdog device\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+ driver_data->da9052 = da9052;
+
+ da9052_wdt = &driver_data->wdt;
+
+ da9052_wdt->timeout = DA9052_DEF_TIMEOUT;
+ da9052_wdt->info = &da9052_wdt_info;
+ da9052_wdt->ops = &da9052_wdt_ops;
+ watchdog_set_drvdata(da9052_wdt, driver_data);
+
+ kref_init(&driver_data->kref);
+
+ ret = da9052_reg_update(da9052, DA9052_CONTROL_D_REG,
+ DA9052_CONTROLD_TWDSCALE, 0);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to disable watchdog bits, %d\n",
+ ret);
+ goto err;
+ }
+
+ ret = watchdog_register_device(&driver_data->wdt);
+ if (ret != 0) {
+ dev_err(da9052->dev, "watchdog_register_device() failed: %d\n",
+ ret);
+ goto err;
+ }
+
+ dev_set_drvdata(&pdev->dev, driver_data);
+err:
+ return ret;
+}
+
+static int __devexit da9052_wdt_remove(struct platform_device *pdev)
+{
+ struct da9052_wdt_data *driver_data = dev_get_drvdata(&pdev->dev);
+
+ watchdog_unregister_device(&driver_data->wdt);
+ kref_put(&driver_data->kref, da9052_wdt_release_resources);
+
+ return 0;
+}
+
+static struct platform_driver da9052_wdt_driver = {
+ .probe = da9052_wdt_probe,
+ .remove = __devexit_p(da9052_wdt_remove),
+ .driver = {
+ .name = "da9052-watchdog",
+ },
+};
+
+module_platform_driver(da9052_wdt_driver);
+
+MODULE_AUTHOR("Anthony Olech <Anthony.Olech@diasemi.com>");
+MODULE_DESCRIPTION("DA9052 SM Device Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:da9052-watchdog");
diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c
index 741528b..bc47e90 100644
--- a/drivers/watchdog/iTCO_wdt.c
+++ b/drivers/watchdog/iTCO_wdt.c
@@ -575,7 +575,7 @@
if (!request_region(iTCO_wdt_private.smi_res->start,
resource_size(iTCO_wdt_private.smi_res), dev->name)) {
pr_err("I/O address 0x%04llx already in use, device disabled\n",
- SMI_EN);
+ (u64)SMI_EN);
ret = -EBUSY;
goto unmap_gcs;
}
@@ -592,13 +592,13 @@
if (!request_region(iTCO_wdt_private.tco_res->start,
resource_size(iTCO_wdt_private.tco_res), dev->name)) {
pr_err("I/O address 0x%04llx already in use, device disabled\n",
- TCOBASE);
+ (u64)TCOBASE);
ret = -EBUSY;
goto unreg_smi;
}
pr_info("Found a %s TCO device (Version=%d, TCOBASE=0x%04llx)\n",
- ich_info->name, ich_info->iTCO_version, TCOBASE);
+ ich_info->name, ich_info->iTCO_version, (u64)TCOBASE);
/* Clear out the (probably old) status */
outw(0x0008, TCO1_STS); /* Clear the Time Out Status bit */
diff --git a/drivers/watchdog/sp805_wdt.c b/drivers/watchdog/sp805_wdt.c
index bbb170e..afcd136 100644
--- a/drivers/watchdog/sp805_wdt.c
+++ b/drivers/watchdog/sp805_wdt.c
@@ -16,20 +16,17 @@
#include <linux/amba/bus.h>
#include <linux/bitops.h>
#include <linux/clk.h>
-#include <linux/fs.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/kernel.h>
#include <linux/math64.h>
-#include <linux/miscdevice.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/pm.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/types.h>
-#include <linux/uaccess.h>
#include <linux/watchdog.h>
/* default timeout in seconds */
@@ -56,6 +53,7 @@
/**
* struct sp805_wdt: sp805 wdt device structure
+ * @wdd: instance of struct watchdog_device
* @lock: spin lock protecting dev structure and io access
* @base: base address of wdt
* @clk: clock structure of wdt
@@ -65,24 +63,24 @@
* @timeout: current programmed timeout
*/
struct sp805_wdt {
+ struct watchdog_device wdd;
spinlock_t lock;
void __iomem *base;
struct clk *clk;
struct amba_device *adev;
- unsigned long status;
- #define WDT_BUSY 0
- #define WDT_CAN_BE_CLOSED 1
unsigned int load_val;
unsigned int timeout;
};
-/* local variables */
-static struct sp805_wdt *wdt;
static bool nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, bool, 0);
+MODULE_PARM_DESC(nowayout,
+ "Set to 1 to keep watchdog running after device release");
/* This routine finds load value that will reset system in required timout */
-static void wdt_setload(unsigned int timeout)
+static int wdt_setload(struct watchdog_device *wdd, unsigned int timeout)
{
+ struct sp805_wdt *wdt = watchdog_get_drvdata(wdd);
u64 load, rate;
rate = clk_get_rate(wdt->clk);
@@ -103,11 +101,14 @@
/* roundup timeout to closest positive integer value */
wdt->timeout = div_u64((load + 1) * 2 + (rate / 2), rate);
spin_unlock(&wdt->lock);
+
+ return 0;
}
/* returns number of seconds left for reset to occur */
-static u32 wdt_timeleft(void)
+static unsigned int wdt_timeleft(struct watchdog_device *wdd)
{
+ struct sp805_wdt *wdt = watchdog_get_drvdata(wdd);
u64 load, rate;
rate = clk_get_rate(wdt->clk);
@@ -123,25 +124,62 @@
return div_u64(load, rate);
}
-/* enables watchdog timers reset */
-static void wdt_enable(void)
+static int wdt_config(struct watchdog_device *wdd, bool ping)
{
+ struct sp805_wdt *wdt = watchdog_get_drvdata(wdd);
+ int ret;
+
+ if (!ping) {
+ ret = clk_prepare(wdt->clk);
+ if (ret) {
+ dev_err(&wdt->adev->dev, "clock prepare fail");
+ return ret;
+ }
+
+ ret = clk_enable(wdt->clk);
+ if (ret) {
+ dev_err(&wdt->adev->dev, "clock enable fail");
+ clk_unprepare(wdt->clk);
+ return ret;
+ }
+ }
+
spin_lock(&wdt->lock);
writel_relaxed(UNLOCK, wdt->base + WDTLOCK);
writel_relaxed(wdt->load_val, wdt->base + WDTLOAD);
- writel_relaxed(INT_MASK, wdt->base + WDTINTCLR);
- writel_relaxed(INT_ENABLE | RESET_ENABLE, wdt->base + WDTCONTROL);
+
+ if (!ping) {
+ writel_relaxed(INT_MASK, wdt->base + WDTINTCLR);
+ writel_relaxed(INT_ENABLE | RESET_ENABLE, wdt->base +
+ WDTCONTROL);
+ }
+
writel_relaxed(LOCK, wdt->base + WDTLOCK);
/* Flush posted writes. */
readl_relaxed(wdt->base + WDTLOCK);
spin_unlock(&wdt->lock);
+
+ return 0;
+}
+
+static int wdt_ping(struct watchdog_device *wdd)
+{
+ return wdt_config(wdd, true);
+}
+
+/* enables watchdog timers reset */
+static int wdt_enable(struct watchdog_device *wdd)
+{
+ return wdt_config(wdd, false);
}
/* disables watchdog timers reset */
-static void wdt_disable(void)
+static int wdt_disable(struct watchdog_device *wdd)
{
+ struct sp805_wdt *wdt = watchdog_get_drvdata(wdd);
+
spin_lock(&wdt->lock);
writel_relaxed(UNLOCK, wdt->base + WDTLOCK);
@@ -151,138 +189,31 @@
/* Flush posted writes. */
readl_relaxed(wdt->base + WDTLOCK);
spin_unlock(&wdt->lock);
-}
-static ssize_t sp805_wdt_write(struct file *file, const char *data,
- size_t len, loff_t *ppos)
-{
- if (len) {
- if (!nowayout) {
- size_t i;
-
- clear_bit(WDT_CAN_BE_CLOSED, &wdt->status);
-
- for (i = 0; i != len; i++) {
- char c;
-
- if (get_user(c, data + i))
- return -EFAULT;
- /* Check for Magic Close character */
- if (c == 'V') {
- set_bit(WDT_CAN_BE_CLOSED,
- &wdt->status);
- break;
- }
- }
- }
- wdt_enable();
- }
- return len;
-}
-
-static const struct watchdog_info ident = {
- .options = WDIOF_MAGICCLOSE | WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING,
- .identity = MODULE_NAME,
-};
-
-static long sp805_wdt_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- int ret = -ENOTTY;
- unsigned int timeout;
-
- switch (cmd) {
- case WDIOC_GETSUPPORT:
- ret = copy_to_user((struct watchdog_info *)arg, &ident,
- sizeof(ident)) ? -EFAULT : 0;
- break;
-
- case WDIOC_GETSTATUS:
- ret = put_user(0, (int *)arg);
- break;
-
- case WDIOC_KEEPALIVE:
- wdt_enable();
- ret = 0;
- break;
-
- case WDIOC_SETTIMEOUT:
- ret = get_user(timeout, (unsigned int *)arg);
- if (ret)
- break;
-
- wdt_setload(timeout);
-
- wdt_enable();
- /* Fall through */
-
- case WDIOC_GETTIMEOUT:
- ret = put_user(wdt->timeout, (unsigned int *)arg);
- break;
- case WDIOC_GETTIMELEFT:
- ret = put_user(wdt_timeleft(), (unsigned int *)arg);
- break;
- }
- return ret;
-}
-
-static int sp805_wdt_open(struct inode *inode, struct file *file)
-{
- int ret = 0;
-
- if (test_and_set_bit(WDT_BUSY, &wdt->status))
- return -EBUSY;
-
- ret = clk_enable(wdt->clk);
- if (ret) {
- dev_err(&wdt->adev->dev, "clock enable fail");
- goto err;
- }
-
- wdt_enable();
-
- /* can not be closed, once enabled */
- clear_bit(WDT_CAN_BE_CLOSED, &wdt->status);
- return nonseekable_open(inode, file);
-
-err:
- clear_bit(WDT_BUSY, &wdt->status);
- return ret;
-}
-
-static int sp805_wdt_release(struct inode *inode, struct file *file)
-{
- if (!test_bit(WDT_CAN_BE_CLOSED, &wdt->status)) {
- clear_bit(WDT_BUSY, &wdt->status);
- dev_warn(&wdt->adev->dev, "Device closed unexpectedly\n");
- return 0;
- }
-
- wdt_disable();
clk_disable(wdt->clk);
- clear_bit(WDT_BUSY, &wdt->status);
+ clk_unprepare(wdt->clk);
return 0;
}
-static const struct file_operations sp805_wdt_fops = {
- .owner = THIS_MODULE,
- .llseek = no_llseek,
- .write = sp805_wdt_write,
- .unlocked_ioctl = sp805_wdt_ioctl,
- .open = sp805_wdt_open,
- .release = sp805_wdt_release,
+static const struct watchdog_info wdt_info = {
+ .options = WDIOF_MAGICCLOSE | WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING,
+ .identity = MODULE_NAME,
};
-static struct miscdevice sp805_wdt_miscdev = {
- .minor = WATCHDOG_MINOR,
- .name = "watchdog",
- .fops = &sp805_wdt_fops,
+static const struct watchdog_ops wdt_ops = {
+ .owner = THIS_MODULE,
+ .start = wdt_enable,
+ .stop = wdt_disable,
+ .ping = wdt_ping,
+ .set_timeout = wdt_setload,
+ .get_timeleft = wdt_timeleft,
};
static int __devinit
sp805_wdt_probe(struct amba_device *adev, const struct amba_id *id)
{
+ struct sp805_wdt *wdt;
int ret = 0;
if (!devm_request_mem_region(&adev->dev, adev->res.start,
@@ -315,19 +246,26 @@
}
wdt->adev = adev;
- spin_lock_init(&wdt->lock);
- wdt_setload(DEFAULT_TIMEOUT);
+ wdt->wdd.info = &wdt_info;
+ wdt->wdd.ops = &wdt_ops;
- ret = misc_register(&sp805_wdt_miscdev);
- if (ret < 0) {
- dev_warn(&adev->dev, "cannot register misc device\n");
- goto err_misc_register;
+ spin_lock_init(&wdt->lock);
+ watchdog_set_nowayout(&wdt->wdd, nowayout);
+ watchdog_set_drvdata(&wdt->wdd, wdt);
+ wdt_setload(&wdt->wdd, DEFAULT_TIMEOUT);
+
+ ret = watchdog_register_device(&wdt->wdd);
+ if (ret) {
+ dev_err(&adev->dev, "watchdog_register_device() failed: %d\n",
+ ret);
+ goto err_register;
}
+ amba_set_drvdata(adev, wdt);
dev_info(&adev->dev, "registration successful\n");
return 0;
-err_misc_register:
+err_register:
clk_put(wdt->clk);
err:
dev_err(&adev->dev, "Probe Failed!!!\n");
@@ -336,7 +274,11 @@
static int __devexit sp805_wdt_remove(struct amba_device *adev)
{
- misc_deregister(&sp805_wdt_miscdev);
+ struct sp805_wdt *wdt = amba_get_drvdata(adev);
+
+ watchdog_unregister_device(&wdt->wdd);
+ amba_set_drvdata(adev, NULL);
+ watchdog_set_drvdata(&wdt->wdd, NULL);
clk_put(wdt->clk);
return 0;
@@ -345,28 +287,22 @@
#ifdef CONFIG_PM
static int sp805_wdt_suspend(struct device *dev)
{
- if (test_bit(WDT_BUSY, &wdt->status)) {
- wdt_disable();
- clk_disable(wdt->clk);
- }
+ struct sp805_wdt *wdt = dev_get_drvdata(dev);
+
+ if (watchdog_active(&wdt->wdd))
+ return wdt_disable(&wdt->wdd);
return 0;
}
static int sp805_wdt_resume(struct device *dev)
{
- int ret = 0;
+ struct sp805_wdt *wdt = dev_get_drvdata(dev);
- if (test_bit(WDT_BUSY, &wdt->status)) {
- ret = clk_enable(wdt->clk);
- if (ret) {
- dev_err(dev, "clock enable fail");
- return ret;
- }
- wdt_enable();
- }
+ if (watchdog_active(&wdt->wdd))
+ return wdt_enable(&wdt->wdd);
- return ret;
+ return 0;
}
#endif /* CONFIG_PM */
@@ -395,11 +331,6 @@
module_amba_driver(sp805_wdt_driver);
-module_param(nowayout, bool, 0);
-MODULE_PARM_DESC(nowayout,
- "Set to 1 to keep watchdog running after device release");
-
MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
MODULE_DESCRIPTION("ARM SP805 Watchdog Driver");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/via_wdt.c b/drivers/watchdog/via_wdt.c
index 5603e31..aa50da3 100644
--- a/drivers/watchdog/via_wdt.c
+++ b/drivers/watchdog/via_wdt.c
@@ -91,7 +91,7 @@
static void wdt_timer_tick(unsigned long data)
{
if (time_before(jiffies, next_heartbeat) ||
- (!test_bit(WDOG_ACTIVE, &wdt_dev.status))) {
+ (!watchdog_active(&wdt_dev))) {
wdt_reset();
mod_timer(&timer, jiffies + WDT_HEARTBEAT);
} else
diff --git a/drivers/watchdog/watchdog_core.c b/drivers/watchdog/watchdog_core.c
index 14d768b..6aa46a9 100644
--- a/drivers/watchdog/watchdog_core.c
+++ b/drivers/watchdog/watchdog_core.c
@@ -34,8 +34,13 @@
#include <linux/kernel.h> /* For printk/panic/... */
#include <linux/watchdog.h> /* For watchdog specific items */
#include <linux/init.h> /* For __init/__exit/... */
+#include <linux/idr.h> /* For ida_* macros */
+#include <linux/err.h> /* For IS_ERR macros */
-#include "watchdog_dev.h" /* For watchdog_dev_register/... */
+#include "watchdog_core.h" /* For watchdog_dev_register/... */
+
+static DEFINE_IDA(watchdog_ida);
+static struct class *watchdog_class;
/**
* watchdog_register_device() - register a watchdog device
@@ -49,7 +54,7 @@
*/
int watchdog_register_device(struct watchdog_device *wdd)
{
- int ret;
+ int ret, id, devno;
if (wdd == NULL || wdd->info == NULL || wdd->ops == NULL)
return -EINVAL;
@@ -74,10 +79,38 @@
* corrupted in a later stage then we expect a kernel panic!
*/
- /* We only support 1 watchdog device via the /dev/watchdog interface */
+ mutex_init(&wdd->lock);
+ id = ida_simple_get(&watchdog_ida, 0, MAX_DOGS, GFP_KERNEL);
+ if (id < 0)
+ return id;
+ wdd->id = id;
+
ret = watchdog_dev_register(wdd);
if (ret) {
- pr_err("error registering /dev/watchdog (err=%d)\n", ret);
+ ida_simple_remove(&watchdog_ida, id);
+ if (!(id == 0 && ret == -EBUSY))
+ return ret;
+
+ /* Retry in case a legacy watchdog module exists */
+ id = ida_simple_get(&watchdog_ida, 1, MAX_DOGS, GFP_KERNEL);
+ if (id < 0)
+ return id;
+ wdd->id = id;
+
+ ret = watchdog_dev_register(wdd);
+ if (ret) {
+ ida_simple_remove(&watchdog_ida, id);
+ return ret;
+ }
+ }
+
+ devno = wdd->cdev.dev;
+ wdd->dev = device_create(watchdog_class, wdd->parent, devno,
+ NULL, "watchdog%d", wdd->id);
+ if (IS_ERR(wdd->dev)) {
+ watchdog_dev_unregister(wdd);
+ ida_simple_remove(&watchdog_ida, id);
+ ret = PTR_ERR(wdd->dev);
return ret;
}
@@ -95,6 +128,7 @@
void watchdog_unregister_device(struct watchdog_device *wdd)
{
int ret;
+ int devno = wdd->cdev.dev;
if (wdd == NULL)
return;
@@ -102,9 +136,41 @@
ret = watchdog_dev_unregister(wdd);
if (ret)
pr_err("error unregistering /dev/watchdog (err=%d)\n", ret);
+ device_destroy(watchdog_class, devno);
+ ida_simple_remove(&watchdog_ida, wdd->id);
+ wdd->dev = NULL;
}
EXPORT_SYMBOL_GPL(watchdog_unregister_device);
+static int __init watchdog_init(void)
+{
+ int err;
+
+ watchdog_class = class_create(THIS_MODULE, "watchdog");
+ if (IS_ERR(watchdog_class)) {
+ pr_err("couldn't create class\n");
+ return PTR_ERR(watchdog_class);
+ }
+
+ err = watchdog_dev_init();
+ if (err < 0) {
+ class_destroy(watchdog_class);
+ return err;
+ }
+
+ return 0;
+}
+
+static void __exit watchdog_exit(void)
+{
+ watchdog_dev_exit();
+ class_destroy(watchdog_class);
+ ida_destroy(&watchdog_ida);
+}
+
+subsys_initcall(watchdog_init);
+module_exit(watchdog_exit);
+
MODULE_AUTHOR("Alan Cox <alan@lxorguk.ukuu.org.uk>");
MODULE_AUTHOR("Wim Van Sebroeck <wim@iguana.be>");
MODULE_DESCRIPTION("WatchDog Timer Driver Core");
diff --git a/drivers/watchdog/watchdog_dev.h b/drivers/watchdog/watchdog_core.h
similarity index 79%
rename from drivers/watchdog/watchdog_dev.h
rename to drivers/watchdog/watchdog_core.h
index bc7612b..6c95141 100644
--- a/drivers/watchdog/watchdog_dev.h
+++ b/drivers/watchdog/watchdog_core.h
@@ -26,8 +26,12 @@
* This material is provided "AS-IS" and at no charge.
*/
+#define MAX_DOGS 32 /* Maximum number of watchdog devices */
+
/*
* Functions/procedures to be called by the core
*/
-int watchdog_dev_register(struct watchdog_device *);
-int watchdog_dev_unregister(struct watchdog_device *);
+extern int watchdog_dev_register(struct watchdog_device *);
+extern int watchdog_dev_unregister(struct watchdog_device *);
+extern int __init watchdog_dev_init(void);
+extern void __exit watchdog_dev_exit(void);
diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c
index 8558da9..672d169 100644
--- a/drivers/watchdog/watchdog_dev.c
+++ b/drivers/watchdog/watchdog_dev.c
@@ -42,10 +42,12 @@
#include <linux/init.h> /* For __init/__exit/... */
#include <linux/uaccess.h> /* For copy_to_user/put_user/... */
-/* make sure we only register one /dev/watchdog device */
-static unsigned long watchdog_dev_busy;
+#include "watchdog_core.h"
+
+/* the dev_t structure to store the dynamically allocated watchdog devices */
+static dev_t watchdog_devt;
/* the watchdog device behind /dev/watchdog */
-static struct watchdog_device *wdd;
+static struct watchdog_device *old_wdd;
/*
* watchdog_ping: ping the watchdog.
@@ -59,13 +61,26 @@
static int watchdog_ping(struct watchdog_device *wddev)
{
- if (test_bit(WDOG_ACTIVE, &wddev->status)) {
- if (wddev->ops->ping)
- return wddev->ops->ping(wddev); /* ping the watchdog */
- else
- return wddev->ops->start(wddev); /* restart watchdog */
+ int err = 0;
+
+ mutex_lock(&wddev->lock);
+
+ if (test_bit(WDOG_UNREGISTERED, &wddev->status)) {
+ err = -ENODEV;
+ goto out_ping;
}
- return 0;
+
+ if (!watchdog_active(wddev))
+ goto out_ping;
+
+ if (wddev->ops->ping)
+ err = wddev->ops->ping(wddev); /* ping the watchdog */
+ else
+ err = wddev->ops->start(wddev); /* restart watchdog */
+
+out_ping:
+ mutex_unlock(&wddev->lock);
+ return err;
}
/*
@@ -79,16 +94,25 @@
static int watchdog_start(struct watchdog_device *wddev)
{
- int err;
+ int err = 0;
- if (!test_bit(WDOG_ACTIVE, &wddev->status)) {
- err = wddev->ops->start(wddev);
- if (err < 0)
- return err;
+ mutex_lock(&wddev->lock);
- set_bit(WDOG_ACTIVE, &wddev->status);
+ if (test_bit(WDOG_UNREGISTERED, &wddev->status)) {
+ err = -ENODEV;
+ goto out_start;
}
- return 0;
+
+ if (watchdog_active(wddev))
+ goto out_start;
+
+ err = wddev->ops->start(wddev);
+ if (err == 0)
+ set_bit(WDOG_ACTIVE, &wddev->status);
+
+out_start:
+ mutex_unlock(&wddev->lock);
+ return err;
}
/*
@@ -103,22 +127,155 @@
static int watchdog_stop(struct watchdog_device *wddev)
{
- int err = -EBUSY;
+ int err = 0;
+
+ mutex_lock(&wddev->lock);
+
+ if (test_bit(WDOG_UNREGISTERED, &wddev->status)) {
+ err = -ENODEV;
+ goto out_stop;
+ }
+
+ if (!watchdog_active(wddev))
+ goto out_stop;
if (test_bit(WDOG_NO_WAY_OUT, &wddev->status)) {
- pr_info("%s: nowayout prevents watchdog to be stopped!\n",
- wddev->info->identity);
- return err;
+ dev_info(wddev->dev, "nowayout prevents watchdog being stopped!\n");
+ err = -EBUSY;
+ goto out_stop;
}
- if (test_bit(WDOG_ACTIVE, &wddev->status)) {
- err = wddev->ops->stop(wddev);
- if (err < 0)
- return err;
-
+ err = wddev->ops->stop(wddev);
+ if (err == 0)
clear_bit(WDOG_ACTIVE, &wddev->status);
+
+out_stop:
+ mutex_unlock(&wddev->lock);
+ return err;
+}
+
+/*
+ * watchdog_get_status: wrapper to get the watchdog status
+ * @wddev: the watchdog device to get the status from
+ * @status: the status of the watchdog device
+ *
+ * Get the watchdog's status flags.
+ */
+
+static int watchdog_get_status(struct watchdog_device *wddev,
+ unsigned int *status)
+{
+ int err = 0;
+
+ *status = 0;
+ if (!wddev->ops->status)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&wddev->lock);
+
+ if (test_bit(WDOG_UNREGISTERED, &wddev->status)) {
+ err = -ENODEV;
+ goto out_status;
}
- return 0;
+
+ *status = wddev->ops->status(wddev);
+
+out_status:
+ mutex_unlock(&wddev->lock);
+ return err;
+}
+
+/*
+ * watchdog_set_timeout: set the watchdog timer timeout
+ * @wddev: the watchdog device to set the timeout for
+ * @timeout: timeout to set in seconds
+ */
+
+static int watchdog_set_timeout(struct watchdog_device *wddev,
+ unsigned int timeout)
+{
+ int err;
+
+ if ((wddev->ops->set_timeout == NULL) ||
+ !(wddev->info->options & WDIOF_SETTIMEOUT))
+ return -EOPNOTSUPP;
+
+ if ((wddev->max_timeout != 0) &&
+ (timeout < wddev->min_timeout || timeout > wddev->max_timeout))
+ return -EINVAL;
+
+ mutex_lock(&wddev->lock);
+
+ if (test_bit(WDOG_UNREGISTERED, &wddev->status)) {
+ err = -ENODEV;
+ goto out_timeout;
+ }
+
+ err = wddev->ops->set_timeout(wddev, timeout);
+
+out_timeout:
+ mutex_unlock(&wddev->lock);
+ return err;
+}
+
+/*
+ * watchdog_get_timeleft: wrapper to get the time left before a reboot
+ * @wddev: the watchdog device to get the remaining time from
+ * @timeleft: the time that's left
+ *
+ * Get the time before a watchdog will reboot (if not pinged).
+ */
+
+static int watchdog_get_timeleft(struct watchdog_device *wddev,
+ unsigned int *timeleft)
+{
+ int err = 0;
+
+ *timeleft = 0;
+ if (!wddev->ops->get_timeleft)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&wddev->lock);
+
+ if (test_bit(WDOG_UNREGISTERED, &wddev->status)) {
+ err = -ENODEV;
+ goto out_timeleft;
+ }
+
+ *timeleft = wddev->ops->get_timeleft(wddev);
+
+out_timeleft:
+ mutex_unlock(&wddev->lock);
+ return err;
+}
+
+/*
+ * watchdog_ioctl_op: call the watchdog drivers ioctl op if defined
+ * @wddev: the watchdog device to do the ioctl on
+ * @cmd: watchdog command
+ * @arg: argument pointer
+ */
+
+static int watchdog_ioctl_op(struct watchdog_device *wddev, unsigned int cmd,
+ unsigned long arg)
+{
+ int err;
+
+ if (!wddev->ops->ioctl)
+ return -ENOIOCTLCMD;
+
+ mutex_lock(&wddev->lock);
+
+ if (test_bit(WDOG_UNREGISTERED, &wddev->status)) {
+ err = -ENODEV;
+ goto out_ioctl;
+ }
+
+ err = wddev->ops->ioctl(wddev, cmd, arg);
+
+out_ioctl:
+ mutex_unlock(&wddev->lock);
+ return err;
}
/*
@@ -136,6 +293,7 @@
static ssize_t watchdog_write(struct file *file, const char __user *data,
size_t len, loff_t *ppos)
{
+ struct watchdog_device *wdd = file->private_data;
size_t i;
char c;
@@ -175,23 +333,24 @@
static long watchdog_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
+ struct watchdog_device *wdd = file->private_data;
void __user *argp = (void __user *)arg;
int __user *p = argp;
unsigned int val;
int err;
- if (wdd->ops->ioctl) {
- err = wdd->ops->ioctl(wdd, cmd, arg);
- if (err != -ENOIOCTLCMD)
- return err;
- }
+ err = watchdog_ioctl_op(wdd, cmd, arg);
+ if (err != -ENOIOCTLCMD)
+ return err;
switch (cmd) {
case WDIOC_GETSUPPORT:
return copy_to_user(argp, wdd->info,
sizeof(struct watchdog_info)) ? -EFAULT : 0;
case WDIOC_GETSTATUS:
- val = wdd->ops->status ? wdd->ops->status(wdd) : 0;
+ err = watchdog_get_status(wdd, &val);
+ if (err)
+ return err;
return put_user(val, p);
case WDIOC_GETBOOTSTATUS:
return put_user(wdd->bootstatus, p);
@@ -215,15 +374,9 @@
watchdog_ping(wdd);
return 0;
case WDIOC_SETTIMEOUT:
- if ((wdd->ops->set_timeout == NULL) ||
- !(wdd->info->options & WDIOF_SETTIMEOUT))
- return -EOPNOTSUPP;
if (get_user(val, p))
return -EFAULT;
- if ((wdd->max_timeout != 0) &&
- (val < wdd->min_timeout || val > wdd->max_timeout))
- return -EINVAL;
- err = wdd->ops->set_timeout(wdd, val);
+ err = watchdog_set_timeout(wdd, val);
if (err < 0)
return err;
/* If the watchdog is active then we send a keepalive ping
@@ -237,21 +390,21 @@
return -EOPNOTSUPP;
return put_user(wdd->timeout, p);
case WDIOC_GETTIMELEFT:
- if (!wdd->ops->get_timeleft)
- return -EOPNOTSUPP;
-
- return put_user(wdd->ops->get_timeleft(wdd), p);
+ err = watchdog_get_timeleft(wdd, &val);
+ if (err)
+ return err;
+ return put_user(val, p);
default:
return -ENOTTY;
}
}
/*
- * watchdog_open: open the /dev/watchdog device.
+ * watchdog_open: open the /dev/watchdog* devices.
* @inode: inode of device
* @file: file handle to device
*
- * When the /dev/watchdog device gets opened, we start the watchdog.
+ * When the /dev/watchdog* device gets opened, we start the watchdog.
* Watch out: the /dev/watchdog device is single open, so we make sure
* it can only be opened once.
*/
@@ -259,6 +412,13 @@
static int watchdog_open(struct inode *inode, struct file *file)
{
int err = -EBUSY;
+ struct watchdog_device *wdd;
+
+ /* Get the corresponding watchdog device */
+ if (imajor(inode) == MISC_MAJOR)
+ wdd = old_wdd;
+ else
+ wdd = container_of(inode->i_cdev, struct watchdog_device, cdev);
/* the watchdog is single open! */
if (test_and_set_bit(WDOG_DEV_OPEN, &wdd->status))
@@ -275,6 +435,11 @@
if (err < 0)
goto out_mod;
+ file->private_data = wdd;
+
+ if (wdd->ops->ref)
+ wdd->ops->ref(wdd);
+
/* dev/watchdog is a virtual (and thus non-seekable) filesystem */
return nonseekable_open(inode, file);
@@ -286,9 +451,9 @@
}
/*
- * watchdog_release: release the /dev/watchdog device.
- * @inode: inode of device
- * @file: file handle to device
+ * watchdog_release: release the watchdog device.
+ * @inode: inode of device
+ * @file: file handle to device
*
* This is the code for when /dev/watchdog gets closed. We will only
* stop the watchdog when we have received the magic char (and nowayout
@@ -297,6 +462,7 @@
static int watchdog_release(struct inode *inode, struct file *file)
{
+ struct watchdog_device *wdd = file->private_data;
int err = -EBUSY;
/*
@@ -310,7 +476,10 @@
/* If the watchdog was not stopped, send a keepalive ping */
if (err < 0) {
- pr_crit("%s: watchdog did not stop!\n", wdd->info->identity);
+ mutex_lock(&wdd->lock);
+ if (!test_bit(WDOG_UNREGISTERED, &wdd->status))
+ dev_crit(wdd->dev, "watchdog did not stop!\n");
+ mutex_unlock(&wdd->lock);
watchdog_ping(wdd);
}
@@ -320,6 +489,10 @@
/* make sure that /dev/watchdog can be re-opened */
clear_bit(WDOG_DEV_OPEN, &wdd->status);
+ /* Note wdd may be gone after this, do not use after this! */
+ if (wdd->ops->unref)
+ wdd->ops->unref(wdd);
+
return 0;
}
@@ -338,62 +511,92 @@
};
/*
- * watchdog_dev_register:
+ * watchdog_dev_register: register a watchdog device
* @watchdog: watchdog device
*
- * Register a watchdog device as /dev/watchdog. /dev/watchdog
- * is actually a miscdevice and thus we set it up like that.
+ * Register a watchdog device including handling the legacy
+ * /dev/watchdog node. /dev/watchdog is actually a miscdevice and
+ * thus we set it up like that.
*/
int watchdog_dev_register(struct watchdog_device *watchdog)
{
- int err;
+ int err, devno;
- /* Only one device can register for /dev/watchdog */
- if (test_and_set_bit(0, &watchdog_dev_busy)) {
- pr_err("only one watchdog can use /dev/watchdog\n");
- return -EBUSY;
+ if (watchdog->id == 0) {
+ watchdog_miscdev.parent = watchdog->parent;
+ err = misc_register(&watchdog_miscdev);
+ if (err != 0) {
+ pr_err("%s: cannot register miscdev on minor=%d (err=%d).\n",
+ watchdog->info->identity, WATCHDOG_MINOR, err);
+ if (err == -EBUSY)
+ pr_err("%s: a legacy watchdog module is probably present.\n",
+ watchdog->info->identity);
+ return err;
+ }
+ old_wdd = watchdog;
}
- wdd = watchdog;
+ /* Fill in the data structures */
+ devno = MKDEV(MAJOR(watchdog_devt), watchdog->id);
+ cdev_init(&watchdog->cdev, &watchdog_fops);
+ watchdog->cdev.owner = watchdog->ops->owner;
- err = misc_register(&watchdog_miscdev);
- if (err != 0) {
- pr_err("%s: cannot register miscdev on minor=%d (err=%d)\n",
- watchdog->info->identity, WATCHDOG_MINOR, err);
- goto out;
+ /* Add the device */
+ err = cdev_add(&watchdog->cdev, devno, 1);
+ if (err) {
+ pr_err("watchdog%d unable to add device %d:%d\n",
+ watchdog->id, MAJOR(watchdog_devt), watchdog->id);
+ if (watchdog->id == 0) {
+ misc_deregister(&watchdog_miscdev);
+ old_wdd = NULL;
+ }
}
-
- return 0;
-
-out:
- wdd = NULL;
- clear_bit(0, &watchdog_dev_busy);
return err;
}
/*
- * watchdog_dev_unregister:
+ * watchdog_dev_unregister: unregister a watchdog device
* @watchdog: watchdog device
*
- * Deregister the /dev/watchdog device.
+ * Unregister the watchdog and if needed the legacy /dev/watchdog device.
*/
int watchdog_dev_unregister(struct watchdog_device *watchdog)
{
- /* Check that a watchdog device was registered in the past */
- if (!test_bit(0, &watchdog_dev_busy) || !wdd)
- return -ENODEV;
+ mutex_lock(&watchdog->lock);
+ set_bit(WDOG_UNREGISTERED, &watchdog->status);
+ mutex_unlock(&watchdog->lock);
- /* We can only unregister the watchdog device that was registered */
- if (watchdog != wdd) {
- pr_err("%s: watchdog was not registered as /dev/watchdog\n",
- watchdog->info->identity);
- return -ENODEV;
+ cdev_del(&watchdog->cdev);
+ if (watchdog->id == 0) {
+ misc_deregister(&watchdog_miscdev);
+ old_wdd = NULL;
}
-
- misc_deregister(&watchdog_miscdev);
- wdd = NULL;
- clear_bit(0, &watchdog_dev_busy);
return 0;
}
+
+/*
+ * watchdog_dev_init: init dev part of watchdog core
+ *
+ * Allocate a range of chardev nodes to use for watchdog devices
+ */
+
+int __init watchdog_dev_init(void)
+{
+ int err = alloc_chrdev_region(&watchdog_devt, 0, MAX_DOGS, "watchdog");
+ if (err < 0)
+ pr_err("watchdog: unable to allocate char dev region\n");
+ return err;
+}
+
+/*
+ * watchdog_dev_exit: exit dev part of watchdog core
+ *
+ * Release the range of chardev nodes used for watchdog devices
+ */
+
+void __exit watchdog_dev_exit(void)
+{
+ unregister_chrdev_region(watchdog_devt, MAX_DOGS);
+}
diff --git a/fs/bio.c b/fs/bio.c
index 84da885..73922ab 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -19,12 +19,14 @@
#include <linux/swap.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
+#include <linux/iocontext.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/mempool.h>
#include <linux/workqueue.h>
+#include <linux/cgroup.h>
#include <scsi/sg.h> /* for struct sg_iovec */
#include <trace/events/block.h>
@@ -418,6 +420,7 @@
* last put frees it
*/
if (atomic_dec_and_test(&bio->bi_cnt)) {
+ bio_disassociate_task(bio);
bio->bi_next = NULL;
bio->bi_destructor(bio);
}
@@ -1646,6 +1649,64 @@
}
EXPORT_SYMBOL(bioset_create);
+#ifdef CONFIG_BLK_CGROUP
+/**
+ * bio_associate_current - associate a bio with %current
+ * @bio: target bio
+ *
+ * Associate @bio with %current if it hasn't been associated yet. Block
+ * layer will treat @bio as if it were issued by %current no matter which
+ * task actually issues it.
+ *
+ * This function takes an extra reference of @task's io_context and blkcg
+ * which will be put when @bio is released. The caller must own @bio,
+ * ensure %current->io_context exists, and is responsible for synchronizing
+ * calls to this function.
+ */
+int bio_associate_current(struct bio *bio)
+{
+ struct io_context *ioc;
+ struct cgroup_subsys_state *css;
+
+ if (bio->bi_ioc)
+ return -EBUSY;
+
+ ioc = current->io_context;
+ if (!ioc)
+ return -ENOENT;
+
+ /* acquire active ref on @ioc and associate */
+ get_io_context_active(ioc);
+ bio->bi_ioc = ioc;
+
+ /* associate blkcg if exists */
+ rcu_read_lock();
+ css = task_subsys_state(current, blkio_subsys_id);
+ if (css && css_tryget(css))
+ bio->bi_css = css;
+ rcu_read_unlock();
+
+ return 0;
+}
+
+/**
+ * bio_disassociate_task - undo bio_associate_current()
+ * @bio: target bio
+ */
+void bio_disassociate_task(struct bio *bio)
+{
+ if (bio->bi_ioc) {
+ put_io_context(bio->bi_ioc);
+ bio->bi_ioc = NULL;
+ }
+ if (bio->bi_css) {
+ css_put(bio->bi_css);
+ bio->bi_css = NULL;
+ }
+}
+
+#endif /* CONFIG_BLK_CGROUP */
+
static void __init biovec_init_slabs(void)
{
int i;
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index ed72428..988d4f3 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -54,7 +54,6 @@
req->r_fmode = ceph_flags_to_mode(flags);
req->r_args.open.flags = cpu_to_le32(flags);
req->r_args.open.mode = cpu_to_le32(create_mode);
- req->r_args.open.preferred = cpu_to_le32(-1);
out:
return req;
}
diff --git a/fs/ceph/ioctl.c b/fs/ceph/ioctl.c
index 790914a59..8e3fb69 100644
--- a/fs/ceph/ioctl.c
+++ b/fs/ceph/ioctl.c
@@ -26,8 +26,7 @@
l.stripe_count = ceph_file_layout_stripe_count(ci->i_layout);
l.object_size = ceph_file_layout_object_size(ci->i_layout);
l.data_pool = le32_to_cpu(ci->i_layout.fl_pg_pool);
- l.preferred_osd =
- (s32)le32_to_cpu(ci->i_layout.fl_pg_preferred);
+ l.preferred_osd = (s32)-1;
if (copy_to_user(arg, &l, sizeof(l)))
return -EFAULT;
}
@@ -35,6 +34,32 @@
return err;
}
+static long __validate_layout(struct ceph_mds_client *mdsc,
+ struct ceph_ioctl_layout *l)
+{
+ int i, err;
+
+ /* validate striping parameters */
+ if ((l->object_size & ~PAGE_MASK) ||
+ (l->stripe_unit & ~PAGE_MASK) ||
+ ((unsigned)l->object_size % (unsigned)l->stripe_unit))
+ return -EINVAL;
+
+ /* make sure it's a valid data pool */
+ mutex_lock(&mdsc->mutex);
+ err = -EINVAL;
+ for (i = 0; i < mdsc->mdsmap->m_num_data_pg_pools; i++)
+ if (mdsc->mdsmap->m_data_pg_pools[i] == l->data_pool) {
+ err = 0;
+ break;
+ }
+ mutex_unlock(&mdsc->mutex);
+ if (err)
+ return err;
+
+ return 0;
+}
+
static long ceph_ioctl_set_layout(struct file *file, void __user *arg)
{
struct inode *inode = file->f_dentry->d_inode;
@@ -44,52 +69,40 @@
struct ceph_ioctl_layout l;
struct ceph_inode_info *ci = ceph_inode(file->f_dentry->d_inode);
struct ceph_ioctl_layout nl;
- int err, i;
+ int err;
if (copy_from_user(&l, arg, sizeof(l)))
return -EFAULT;
/* validate changed params against current layout */
err = ceph_do_getattr(file->f_dentry->d_inode, CEPH_STAT_CAP_LAYOUT);
- if (!err) {
- nl.stripe_unit = ceph_file_layout_su(ci->i_layout);
- nl.stripe_count = ceph_file_layout_stripe_count(ci->i_layout);
- nl.object_size = ceph_file_layout_object_size(ci->i_layout);
- nl.data_pool = le32_to_cpu(ci->i_layout.fl_pg_pool);
- nl.preferred_osd =
- (s32)le32_to_cpu(ci->i_layout.fl_pg_preferred);
- } else
+ if (err)
return err;
+ memset(&nl, 0, sizeof(nl));
if (l.stripe_count)
nl.stripe_count = l.stripe_count;
+ else
+ nl.stripe_count = ceph_file_layout_stripe_count(ci->i_layout);
if (l.stripe_unit)
nl.stripe_unit = l.stripe_unit;
+ else
+ nl.stripe_unit = ceph_file_layout_su(ci->i_layout);
if (l.object_size)
nl.object_size = l.object_size;
+ else
+ nl.object_size = ceph_file_layout_object_size(ci->i_layout);
if (l.data_pool)
nl.data_pool = l.data_pool;
- if (l.preferred_osd)
- nl.preferred_osd = l.preferred_osd;
+ else
+ nl.data_pool = ceph_file_layout_pg_pool(ci->i_layout);
- if ((nl.object_size & ~PAGE_MASK) ||
- (nl.stripe_unit & ~PAGE_MASK) ||
- ((unsigned)nl.object_size % (unsigned)nl.stripe_unit))
- return -EINVAL;
+ /* this is obsolete, and always -1 */
+ nl.preferred_osd = le64_to_cpu(-1);
- /* make sure it's a valid data pool */
- if (l.data_pool > 0) {
- mutex_lock(&mdsc->mutex);
- err = -EINVAL;
- for (i = 0; i < mdsc->mdsmap->m_num_data_pg_pools; i++)
- if (mdsc->mdsmap->m_data_pg_pools[i] == l.data_pool) {
- err = 0;
- break;
- }
- mutex_unlock(&mdsc->mutex);
- if (err)
- return err;
- }
+ err = __validate_layout(mdsc, &nl);
+ if (err)
+ return err;
req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETLAYOUT,
USE_AUTH_MDS);
@@ -106,8 +119,6 @@
req->r_args.setlayout.layout.fl_object_size =
cpu_to_le32(l.object_size);
req->r_args.setlayout.layout.fl_pg_pool = cpu_to_le32(l.data_pool);
- req->r_args.setlayout.layout.fl_pg_preferred =
- cpu_to_le32(l.preferred_osd);
parent_inode = ceph_get_dentry_parent_inode(file->f_dentry);
err = ceph_mdsc_do_request(mdsc, parent_inode, req);
@@ -127,33 +138,16 @@
struct inode *inode = file->f_dentry->d_inode;
struct ceph_mds_request *req;
struct ceph_ioctl_layout l;
- int err, i;
+ int err;
struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
/* copy and validate */
if (copy_from_user(&l, arg, sizeof(l)))
return -EFAULT;
- if ((l.object_size & ~PAGE_MASK) ||
- (l.stripe_unit & ~PAGE_MASK) ||
- !l.stripe_unit ||
- (l.object_size &&
- (unsigned)l.object_size % (unsigned)l.stripe_unit))
- return -EINVAL;
-
- /* make sure it's a valid data pool */
- if (l.data_pool > 0) {
- mutex_lock(&mdsc->mutex);
- err = -EINVAL;
- for (i = 0; i < mdsc->mdsmap->m_num_data_pg_pools; i++)
- if (mdsc->mdsmap->m_data_pg_pools[i] == l.data_pool) {
- err = 0;
- break;
- }
- mutex_unlock(&mdsc->mutex);
- if (err)
- return err;
- }
+ err = __validate_layout(mdsc, &l);
+ if (err)
+ return err;
req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETDIRLAYOUT,
USE_AUTH_MDS);
@@ -171,8 +165,6 @@
cpu_to_le32(l.object_size);
req->r_args.setlayout.layout.fl_pg_pool =
cpu_to_le32(l.data_pool);
- req->r_args.setlayout.layout.fl_pg_preferred =
- cpu_to_le32(l.preferred_osd);
err = ceph_mdsc_do_request(mdsc, inode, req);
ceph_mdsc_put_request(req);
diff --git a/fs/ceph/ioctl.h b/fs/ceph/ioctl.h
index be4a604..c77028a 100644
--- a/fs/ceph/ioctl.h
+++ b/fs/ceph/ioctl.h
@@ -34,6 +34,8 @@
struct ceph_ioctl_layout {
__u64 stripe_unit, stripe_count, object_size;
__u64 data_pool;
+
+ /* obsolete. new values ignored, always return -1 */
__s64 preferred_osd;
};
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index 89971e1..200bc87 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -334,10 +334,10 @@
dout("mdsc put_session %p %d -> %d\n", s,
atomic_read(&s->s_ref), atomic_read(&s->s_ref)-1);
if (atomic_dec_and_test(&s->s_ref)) {
- if (s->s_authorizer)
+ if (s->s_auth.authorizer)
s->s_mdsc->fsc->client->monc.auth->ops->destroy_authorizer(
s->s_mdsc->fsc->client->monc.auth,
- s->s_authorizer);
+ s->s_auth.authorizer);
kfree(s);
}
}
@@ -3395,39 +3395,33 @@
/*
* authentication
*/
-static int get_authorizer(struct ceph_connection *con,
- void **buf, int *len, int *proto,
- void **reply_buf, int *reply_len, int force_new)
+
+/*
+ * Note: returned pointer is the address of a structure that's
+ * managed separately. Caller must *not* attempt to free it.
+ */
+static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con,
+ int *proto, int force_new)
{
struct ceph_mds_session *s = con->private;
struct ceph_mds_client *mdsc = s->s_mdsc;
struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth;
- int ret = 0;
+ struct ceph_auth_handshake *auth = &s->s_auth;
- if (force_new && s->s_authorizer) {
- ac->ops->destroy_authorizer(ac, s->s_authorizer);
- s->s_authorizer = NULL;
+ if (force_new && auth->authorizer) {
+ if (ac->ops && ac->ops->destroy_authorizer)
+ ac->ops->destroy_authorizer(ac, auth->authorizer);
+ auth->authorizer = NULL;
}
- if (s->s_authorizer == NULL) {
- if (ac->ops->create_authorizer) {
- ret = ac->ops->create_authorizer(
- ac, CEPH_ENTITY_TYPE_MDS,
- &s->s_authorizer,
- &s->s_authorizer_buf,
- &s->s_authorizer_buf_len,
- &s->s_authorizer_reply_buf,
- &s->s_authorizer_reply_buf_len);
- if (ret)
- return ret;
- }
+ if (!auth->authorizer && ac->ops && ac->ops->create_authorizer) {
+ int ret = ac->ops->create_authorizer(ac, CEPH_ENTITY_TYPE_MDS,
+ auth);
+ if (ret)
+ return ERR_PTR(ret);
}
-
*proto = ac->protocol;
- *buf = s->s_authorizer_buf;
- *len = s->s_authorizer_buf_len;
- *reply_buf = s->s_authorizer_reply_buf;
- *reply_len = s->s_authorizer_reply_buf_len;
- return 0;
+
+ return auth;
}
@@ -3437,7 +3431,7 @@
struct ceph_mds_client *mdsc = s->s_mdsc;
struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth;
- return ac->ops->verify_authorizer_reply(ac, s->s_authorizer, len);
+ return ac->ops->verify_authorizer_reply(ac, s->s_auth.authorizer, len);
}
static int invalidate_authorizer(struct ceph_connection *con)
diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h
index 8c7c04e..dd26846 100644
--- a/fs/ceph/mds_client.h
+++ b/fs/ceph/mds_client.h
@@ -11,6 +11,7 @@
#include <linux/ceph/types.h>
#include <linux/ceph/messenger.h>
#include <linux/ceph/mdsmap.h>
+#include <linux/ceph/auth.h>
/*
* Some lock dependencies:
@@ -113,9 +114,7 @@
struct ceph_connection s_con;
- struct ceph_authorizer *s_authorizer;
- void *s_authorizer_buf, *s_authorizer_reply_buf;
- size_t s_authorizer_buf_len, s_authorizer_reply_buf_len;
+ struct ceph_auth_handshake s_auth;
/* protected by s_gen_ttl_lock */
spinlock_t s_gen_ttl_lock;
diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c
index 35b8633..785cb30 100644
--- a/fs/ceph/xattr.c
+++ b/fs/ceph/xattr.c
@@ -118,15 +118,6 @@
(unsigned long long)ceph_file_layout_su(ci->i_layout),
(unsigned long long)ceph_file_layout_stripe_count(ci->i_layout),
(unsigned long long)ceph_file_layout_object_size(ci->i_layout));
-
- if (ceph_file_layout_pg_preferred(ci->i_layout) >= 0) {
- val += ret;
- size -= ret;
- ret += snprintf(val, size, "preferred_osd=%lld\n",
- (unsigned long long)ceph_file_layout_pg_preferred(
- ci->i_layout));
- }
-
return ret;
}
diff --git a/fs/ioprio.c b/fs/ioprio.c
index 5e6dbe89..e50170c 100644
--- a/fs/ioprio.c
+++ b/fs/ioprio.c
@@ -50,7 +50,7 @@
ioc = get_task_io_context(task, GFP_ATOMIC, NUMA_NO_NODE);
if (ioc) {
- ioc_ioprio_changed(ioc, ioprio);
+ ioc->ioprio = ioprio;
put_io_context(ioc);
}
diff --git a/fs/splice.c b/fs/splice.c
index f847684..406ef2b 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -1388,7 +1388,7 @@
*/
static int get_iovec_page_array(const struct iovec __user *iov,
unsigned int nr_vecs, struct page **pages,
- struct partial_page *partial, int aligned,
+ struct partial_page *partial, bool aligned,
unsigned int pipe_buffers)
{
int buffers = 0, error = 0;
@@ -1626,7 +1626,7 @@
return -ENOMEM;
spd.nr_pages = get_iovec_page_array(iov, nr_segs, spd.pages,
- spd.partial, flags & SPLICE_F_GIFT,
+ spd.partial, false,
pipe->buffers);
if (spd.nr_pages <= 0)
ret = spd.nr_pages;
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 4d94eb8..2643589 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -269,6 +269,14 @@
extern void bvec_free_bs(struct bio_set *, struct bio_vec *, unsigned int);
extern unsigned int bvec_nr_vecs(unsigned short idx);
+#ifdef CONFIG_BLK_CGROUP
+int bio_associate_current(struct bio *bio);
+void bio_disassociate_task(struct bio *bio);
+#else /* CONFIG_BLK_CGROUP */
+static inline int bio_associate_current(struct bio *bio) { return -ENOENT; }
+static inline void bio_disassociate_task(struct bio *bio) { }
+#endif /* CONFIG_BLK_CGROUP */
+
/*
* bio_set is used to allow other portions of the IO system to
* allocate their own private memory pools for bio and iovec structures.
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 4053cbd..0edb65d 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -14,6 +14,8 @@
struct bio_integrity_payload;
struct page;
struct block_device;
+struct io_context;
+struct cgroup_subsys_state;
typedef void (bio_end_io_t) (struct bio *, int);
typedef void (bio_destructor_t) (struct bio *);
@@ -66,6 +68,14 @@
bio_end_io_t *bi_end_io;
void *bi_private;
+#ifdef CONFIG_BLK_CGROUP
+ /*
+ * Optional ioc and css associated with this bio. Put on bio
+ * release. Read comment on top of bio_associate_current().
+ */
+ struct io_context *bi_ioc;
+ struct cgroup_subsys_state *bi_css;
+#endif
#if defined(CONFIG_BLK_DEV_INTEGRITY)
struct bio_integrity_payload *bi_integrity; /* data integrity */
#endif
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 4d4ac24..ba43f40 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -32,10 +32,17 @@
struct request;
struct sg_io_hdr;
struct bsg_job;
+struct blkcg_gq;
#define BLKDEV_MIN_RQ 4
#define BLKDEV_MAX_RQ 128 /* Default maximum */
+/*
+ * Maximum number of blkcg policies allowed to be registered concurrently.
+ * Defined here to simplify include dependency.
+ */
+#define BLKCG_MAX_POLS 2
+
struct request;
typedef void (rq_end_io_fn)(struct request *, int);
@@ -363,6 +370,11 @@
struct list_head timeout_list;
struct list_head icq_list;
+#ifdef CONFIG_BLK_CGROUP
+ DECLARE_BITMAP (blkcg_pols, BLKCG_MAX_POLS);
+ struct blkcg_gq *root_blkg;
+ struct list_head blkg_list;
+#endif
struct queue_limits limits;
@@ -390,12 +402,17 @@
struct mutex sysfs_lock;
+ int bypass_depth;
+
#if defined(CONFIG_BLK_DEV_BSG)
bsg_job_fn *bsg_job_fn;
int bsg_job_size;
struct bsg_class_device bsg_dev;
#endif
+#ifdef CONFIG_BLK_CGROUP
+ struct list_head all_q_node;
+#endif
#ifdef CONFIG_BLK_DEV_THROTTLING
/* Throttle data */
struct throtl_data *td;
@@ -407,7 +424,7 @@
#define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */
#define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */
#define QUEUE_FLAG_DEAD 5 /* queue being torn down */
-#define QUEUE_FLAG_ELVSWITCH 6 /* don't use elevator, just do FIFO */
+#define QUEUE_FLAG_BYPASS 6 /* act as dumb FIFO queue */
#define QUEUE_FLAG_BIDI 7 /* queue supports bidi requests */
#define QUEUE_FLAG_NOMERGES 8 /* disable merge attempts */
#define QUEUE_FLAG_SAME_COMP 9 /* complete on same CPU-group */
@@ -491,6 +508,7 @@
#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
#define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags)
+#define blk_queue_bypass(q) test_bit(QUEUE_FLAG_BYPASS, &(q)->queue_flags)
#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
#define blk_queue_noxmerges(q) \
test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags)
diff --git a/include/linux/ceph/auth.h b/include/linux/ceph/auth.h
index aa13392..d4080f3 100644
--- a/include/linux/ceph/auth.h
+++ b/include/linux/ceph/auth.h
@@ -14,6 +14,14 @@
struct ceph_auth_client;
struct ceph_authorizer;
+struct ceph_auth_handshake {
+ struct ceph_authorizer *authorizer;
+ void *authorizer_buf;
+ size_t authorizer_buf_len;
+ void *authorizer_reply_buf;
+ size_t authorizer_reply_buf_len;
+};
+
struct ceph_auth_client_ops {
const char *name;
@@ -43,9 +51,7 @@
* the response to authenticate the service.
*/
int (*create_authorizer)(struct ceph_auth_client *ac, int peer_type,
- struct ceph_authorizer **a,
- void **buf, size_t *len,
- void **reply_buf, size_t *reply_len);
+ struct ceph_auth_handshake *auth);
int (*verify_authorizer_reply)(struct ceph_auth_client *ac,
struct ceph_authorizer *a, size_t len);
void (*destroy_authorizer)(struct ceph_auth_client *ac,
diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h
index b8c6069..e81ab30 100644
--- a/include/linux/ceph/ceph_fs.h
+++ b/include/linux/ceph/ceph_fs.h
@@ -65,7 +65,7 @@
__le32 fl_object_stripe_unit; /* UNUSED. for per-object parity, if any */
/* object -> pg layout */
- __le32 fl_pg_preferred; /* preferred primary for pg (-1 for none) */
+ __le32 fl_unused; /* unused; used to be preferred primary (-1) */
__le32 fl_pg_pool; /* namespace, crush ruleset, rep level */
} __attribute__ ((packed));
@@ -384,7 +384,7 @@
__le32 stripe_count; /* ... */
__le32 object_size;
__le32 file_replication;
- __le32 preferred;
+ __le32 unused; /* used to be preferred osd */
} __attribute__ ((packed)) open;
struct {
__le32 flags;
diff --git a/include/linux/ceph/decode.h b/include/linux/ceph/decode.h
index 220ae21..d8615de 100644
--- a/include/linux/ceph/decode.h
+++ b/include/linux/ceph/decode.h
@@ -46,9 +46,14 @@
/*
* bounds check input.
*/
+static inline int ceph_has_room(void **p, void *end, size_t n)
+{
+ return end >= *p && n <= end - *p;
+}
+
#define ceph_decode_need(p, end, n, bad) \
do { \
- if (unlikely(*(p) + (n) > (end))) \
+ if (!likely(ceph_has_room(p, end, n))) \
goto bad; \
} while (0)
@@ -167,7 +172,7 @@
#define ceph_encode_need(p, end, n, bad) \
do { \
- if (unlikely(*(p) + (n) > (end))) \
+ if (!likely(ceph_has_room(p, end, n))) \
goto bad; \
} while (0)
diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h
index 3bff047..2521a95 100644
--- a/include/linux/ceph/messenger.h
+++ b/include/linux/ceph/messenger.h
@@ -25,9 +25,9 @@
void (*dispatch) (struct ceph_connection *con, struct ceph_msg *m);
/* authorize an outgoing connection */
- int (*get_authorizer) (struct ceph_connection *con,
- void **buf, int *len, int *proto,
- void **reply_buf, int *reply_len, int force_new);
+ struct ceph_auth_handshake *(*get_authorizer) (
+ struct ceph_connection *con,
+ int *proto, int force_new);
int (*verify_authorizer_reply) (struct ceph_connection *con, int len);
int (*invalidate_authorizer)(struct ceph_connection *con);
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
index 7c05ac2..cedfb1a 100644
--- a/include/linux/ceph/osd_client.h
+++ b/include/linux/ceph/osd_client.h
@@ -6,9 +6,10 @@
#include <linux/mempool.h>
#include <linux/rbtree.h>
-#include "types.h"
-#include "osdmap.h"
-#include "messenger.h"
+#include <linux/ceph/types.h>
+#include <linux/ceph/osdmap.h>
+#include <linux/ceph/messenger.h>
+#include <linux/ceph/auth.h>
/*
* Maximum object name size
@@ -40,9 +41,7 @@
struct list_head o_requests;
struct list_head o_linger_requests;
struct list_head o_osd_lru;
- struct ceph_authorizer *o_authorizer;
- void *o_authorizer_buf, *o_authorizer_reply_buf;
- size_t o_authorizer_buf_len, o_authorizer_reply_buf_len;
+ struct ceph_auth_handshake o_auth;
unsigned long lru_ttl;
int o_marked_for_keepalive;
struct list_head o_keepalive_item;
diff --git a/include/linux/ceph/osdmap.h b/include/linux/ceph/osdmap.h
index ba4c205..311ef8d 100644
--- a/include/linux/ceph/osdmap.h
+++ b/include/linux/ceph/osdmap.h
@@ -65,8 +65,6 @@
#define ceph_file_layout_cas_hash(l) ((__s32)le32_to_cpu((l).fl_cas_hash))
#define ceph_file_layout_object_su(l) \
((__s32)le32_to_cpu((l).fl_object_stripe_unit))
-#define ceph_file_layout_pg_preferred(l) \
- ((__s32)le32_to_cpu((l).fl_pg_preferred))
#define ceph_file_layout_pg_pool(l) \
((__s32)le32_to_cpu((l).fl_pg_pool))
diff --git a/include/linux/crush/crush.h b/include/linux/crush/crush.h
index 97e435b..7c47508 100644
--- a/include/linux/crush/crush.h
+++ b/include/linux/crush/crush.h
@@ -151,16 +151,6 @@
struct crush_bucket **buckets;
struct crush_rule **rules;
- /*
- * Parent pointers to identify the parent bucket a device or
- * bucket in the hierarchy. If an item appears more than
- * once, this is the _last_ time it appeared (where buckets
- * are processed in bucket id order, from -1 on down to
- * -max_buckets.
- */
- __u32 *bucket_parents;
- __u32 *device_parents;
-
__s32 max_buckets;
__u32 max_rules;
__s32 max_devices;
@@ -168,8 +158,7 @@
/* crush.c */
-extern int crush_get_bucket_item_weight(struct crush_bucket *b, int pos);
-extern void crush_calc_parents(struct crush_map *map);
+extern int crush_get_bucket_item_weight(const struct crush_bucket *b, int pos);
extern void crush_destroy_bucket_uniform(struct crush_bucket_uniform *b);
extern void crush_destroy_bucket_list(struct crush_bucket_list *b);
extern void crush_destroy_bucket_tree(struct crush_bucket_tree *b);
@@ -177,4 +166,9 @@
extern void crush_destroy_bucket(struct crush_bucket *b);
extern void crush_destroy(struct crush_map *map);
+static inline int crush_calc_tree_node(int i)
+{
+ return ((i+1) << 1)-1;
+}
+
#endif
diff --git a/include/linux/crush/mapper.h b/include/linux/crush/mapper.h
index c46b99c..71d79f4 100644
--- a/include/linux/crush/mapper.h
+++ b/include/linux/crush/mapper.h
@@ -10,11 +10,10 @@
#include "crush.h"
-extern int crush_find_rule(struct crush_map *map, int pool, int type, int size);
-extern int crush_do_rule(struct crush_map *map,
+extern int crush_find_rule(const struct crush_map *map, int ruleset, int type, int size);
+extern int crush_do_rule(const struct crush_map *map,
int ruleno,
int x, int *result, int result_max,
- int forcefeed, /* -1 for none */
- __u32 *weights);
+ const __u32 *weights);
#endif
diff --git a/include/linux/drbd.h b/include/linux/drbd.h
index 9e5f560..47e3d48 100644
--- a/include/linux/drbd.h
+++ b/include/linux/drbd.h
@@ -53,7 +53,7 @@
extern const char *drbd_buildtag(void);
-#define REL_VERSION "8.3.11"
+#define REL_VERSION "8.3.13"
#define API_VERSION 88
#define PRO_VERSION_MIN 86
#define PRO_VERSION_MAX 96
@@ -112,8 +112,8 @@
ERR_OPEN_MD_DISK = 105,
ERR_DISK_NOT_BDEV = 107,
ERR_MD_NOT_BDEV = 108,
- ERR_DISK_TO_SMALL = 111,
- ERR_MD_DISK_TO_SMALL = 112,
+ ERR_DISK_TOO_SMALL = 111,
+ ERR_MD_DISK_TOO_SMALL = 112,
ERR_BDCLAIM_DISK = 114,
ERR_BDCLAIM_MD_DISK = 115,
ERR_MD_IDX_INVALID = 116,
diff --git a/include/linux/drbd_limits.h b/include/linux/drbd_limits.h
index 447c367..fb670bf 100644
--- a/include/linux/drbd_limits.h
+++ b/include/linux/drbd_limits.h
@@ -48,6 +48,11 @@
#define DRBD_TIMEOUT_MAX 600
#define DRBD_TIMEOUT_DEF 60 /* 6 seconds */
+ /* If backing disk takes longer than disk_timeout, mark the disk as failed */
+#define DRBD_DISK_TIMEOUT_MIN 0 /* 0 = disabled */
+#define DRBD_DISK_TIMEOUT_MAX 6000 /* 10 Minutes */
+#define DRBD_DISK_TIMEOUT_DEF 0 /* disabled */
+
/* active connection retries when C_WF_CONNECTION */
#define DRBD_CONNECT_INT_MIN 1
#define DRBD_CONNECT_INT_MAX 120
@@ -60,7 +65,7 @@
/* timeout for the ping packets.*/
#define DRBD_PING_TIMEO_MIN 1
-#define DRBD_PING_TIMEO_MAX 100
+#define DRBD_PING_TIMEO_MAX 300
#define DRBD_PING_TIMEO_DEF 5
/* max number of write requests between write barriers */
diff --git a/include/linux/drbd_nl.h b/include/linux/drbd_nl.h
index ab6159e4..a8706f0 100644
--- a/include/linux/drbd_nl.h
+++ b/include/linux/drbd_nl.h
@@ -31,9 +31,12 @@
NL_INTEGER( 56, T_MAY_IGNORE, max_bio_bvecs)
NL_BIT( 57, T_MAY_IGNORE, no_disk_barrier)
NL_BIT( 58, T_MAY_IGNORE, no_disk_drain)
+ NL_INTEGER( 89, T_MAY_IGNORE, disk_timeout)
)
-NL_PACKET(detach, 4, )
+NL_PACKET(detach, 4,
+ NL_BIT( 88, T_MANDATORY, detach_force)
+)
NL_PACKET(net_conf, 5,
NL_STRING( 8, T_MANDATORY, my_addr, 128)
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index 7d4e035..c03af76 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -28,12 +28,13 @@
typedef void (elevator_init_icq_fn) (struct io_cq *);
typedef void (elevator_exit_icq_fn) (struct io_cq *);
-typedef int (elevator_set_req_fn) (struct request_queue *, struct request *, gfp_t);
+typedef int (elevator_set_req_fn) (struct request_queue *, struct request *,
+ struct bio *, gfp_t);
typedef void (elevator_put_req_fn) (struct request *);
typedef void (elevator_activate_req_fn) (struct request_queue *, struct request *);
typedef void (elevator_deactivate_req_fn) (struct request_queue *, struct request *);
-typedef void *(elevator_init_fn) (struct request_queue *);
+typedef int (elevator_init_fn) (struct request_queue *);
typedef void (elevator_exit_fn) (struct elevator_queue *);
struct elevator_ops
@@ -129,7 +130,8 @@
extern int elv_may_queue(struct request_queue *, int);
extern void elv_abort_queue(struct request_queue *);
extern void elv_completed_request(struct request_queue *, struct request *);
-extern int elv_set_request(struct request_queue *, struct request *, gfp_t);
+extern int elv_set_request(struct request_queue *q, struct request *rq,
+ struct bio *bio, gfp_t gfp_mask);
extern void elv_put_request(struct request_queue *, struct request *);
extern void elv_drain_elevator(struct request_queue *);
diff --git a/include/linux/genetlink.h b/include/linux/genetlink.h
index 73c28de..7a11401 100644
--- a/include/linux/genetlink.h
+++ b/include/linux/genetlink.h
@@ -110,6 +110,9 @@
#define genl_dereference(p) \
rcu_dereference_protected(p, lockdep_genl_is_held())
+#define MODULE_ALIAS_GENL_FAMILY(family)\
+ MODULE_ALIAS_NET_PF_PROTO_NAME(PF_NETLINK, NETLINK_GENERIC, "-family-" family)
+
#endif /* __KERNEL__ */
#endif /* __LINUX_GENERIC_NETLINK_H */
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index b66cb60..ddfa041 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -541,7 +541,7 @@
__u16 flags;
#define I2C_M_TEN 0x0010 /* this is a ten bit chip address */
#define I2C_M_RD 0x0001 /* read data, from slave to master */
-#define I2C_M_NOSTART 0x4000 /* if I2C_FUNC_PROTOCOL_MANGLING */
+#define I2C_M_NOSTART 0x4000 /* if I2C_FUNC_NOSTART */
#define I2C_M_REV_DIR_ADDR 0x2000 /* if I2C_FUNC_PROTOCOL_MANGLING */
#define I2C_M_IGNORE_NAK 0x1000 /* if I2C_FUNC_PROTOCOL_MANGLING */
#define I2C_M_NO_RD_ACK 0x0800 /* if I2C_FUNC_PROTOCOL_MANGLING */
@@ -554,8 +554,9 @@
#define I2C_FUNC_I2C 0x00000001
#define I2C_FUNC_10BIT_ADDR 0x00000002
-#define I2C_FUNC_PROTOCOL_MANGLING 0x00000004 /* I2C_M_NOSTART etc. */
+#define I2C_FUNC_PROTOCOL_MANGLING 0x00000004 /* I2C_M_IGNORE_NAK etc. */
#define I2C_FUNC_SMBUS_PEC 0x00000008
+#define I2C_FUNC_NOSTART 0x00000010 /* I2C_M_NOSTART */
#define I2C_FUNC_SMBUS_BLOCK_PROC_CALL 0x00008000 /* SMBus 2.0 */
#define I2C_FUNC_SMBUS_QUICK 0x00010000
#define I2C_FUNC_SMBUS_READ_BYTE 0x00020000
diff --git a/include/linux/iocontext.h b/include/linux/iocontext.h
index 1a30180..df38db2 100644
--- a/include/linux/iocontext.h
+++ b/include/linux/iocontext.h
@@ -6,11 +6,7 @@
#include <linux/workqueue.h>
enum {
- ICQ_IOPRIO_CHANGED = 1 << 0,
- ICQ_CGROUP_CHANGED = 1 << 1,
ICQ_EXITED = 1 << 2,
-
- ICQ_CHANGED_MASK = ICQ_IOPRIO_CHANGED | ICQ_CGROUP_CHANGED,
};
/*
@@ -100,6 +96,7 @@
*/
struct io_context {
atomic_long_t refcount;
+ atomic_t active_ref;
atomic_t nr_tasks;
/* all the fields below are protected by this lock */
@@ -120,29 +117,37 @@
struct work_struct release_work;
};
-static inline struct io_context *ioc_task_link(struct io_context *ioc)
+/**
+ * get_io_context_active - get active reference on ioc
+ * @ioc: ioc of interest
+ *
+ * Only iocs with active reference can issue new IOs. This function
+ * acquires an active reference on @ioc. The caller must already have an
+ * active reference on @ioc.
+ */
+static inline void get_io_context_active(struct io_context *ioc)
{
- /*
- * if ref count is zero, don't allow sharing (ioc is going away, it's
- * a race).
- */
- if (ioc && atomic_long_inc_not_zero(&ioc->refcount)) {
- atomic_inc(&ioc->nr_tasks);
- return ioc;
- }
+ WARN_ON_ONCE(atomic_long_read(&ioc->refcount) <= 0);
+ WARN_ON_ONCE(atomic_read(&ioc->active_ref) <= 0);
+ atomic_long_inc(&ioc->refcount);
+ atomic_inc(&ioc->active_ref);
+}
- return NULL;
+static inline void ioc_task_link(struct io_context *ioc)
+{
+ get_io_context_active(ioc);
+
+ WARN_ON_ONCE(atomic_read(&ioc->nr_tasks) <= 0);
+ atomic_inc(&ioc->nr_tasks);
}
struct task_struct;
#ifdef CONFIG_BLOCK
void put_io_context(struct io_context *ioc);
+void put_io_context_active(struct io_context *ioc);
void exit_io_context(struct task_struct *task);
struct io_context *get_task_io_context(struct task_struct *task,
gfp_t gfp_flags, int node);
-void ioc_ioprio_changed(struct io_context *ioc, int ioprio);
-void ioc_cgroup_changed(struct io_context *ioc);
-unsigned int icq_get_changed(struct io_cq *icq);
#else
struct io_context;
static inline void put_io_context(struct io_context *ioc) { }
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index d937580..450293f 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -35,12 +35,13 @@
#define IOMMU_FAULT_WRITE 0x1
typedef int (*iommu_fault_handler_t)(struct iommu_domain *,
- struct device *, unsigned long, int);
+ struct device *, unsigned long, int, void *);
struct iommu_domain {
struct iommu_ops *ops;
void *priv;
iommu_fault_handler_t handler;
+ void *handler_token;
};
#define IOMMU_CAP_CACHE_COHERENCY 0x1
@@ -95,7 +96,7 @@
extern int iommu_domain_has_cap(struct iommu_domain *domain,
unsigned long cap);
extern void iommu_set_fault_handler(struct iommu_domain *domain,
- iommu_fault_handler_t handler);
+ iommu_fault_handler_t handler, void *token);
extern int iommu_device_group(struct device *dev, unsigned int *groupid);
/**
@@ -132,7 +133,8 @@
* invoke it.
*/
if (domain->handler)
- ret = domain->handler(domain, dev, iova, flags);
+ ret = domain->handler(domain, dev, iova, flags,
+ domain->handler_token);
return ret;
}
@@ -191,7 +193,7 @@
}
static inline void iommu_set_fault_handler(struct iommu_domain *domain,
- iommu_fault_handler_t handler)
+ iommu_fault_handler_t handler, void *token)
{
}
diff --git a/include/linux/ioprio.h b/include/linux/ioprio.h
index 76dad48..beb9ce1 100644
--- a/include/linux/ioprio.h
+++ b/include/linux/ioprio.h
@@ -42,26 +42,14 @@
};
/*
+ * Fallback BE priority
+ */
+#define IOPRIO_NORM (4)
+
+/*
* if process has set io priority explicitly, use that. if not, convert
* the cpu scheduler nice value to an io priority
*/
-#define IOPRIO_NORM (4)
-static inline int task_ioprio(struct io_context *ioc)
-{
- if (ioprio_valid(ioc->ioprio))
- return IOPRIO_PRIO_DATA(ioc->ioprio);
-
- return IOPRIO_NORM;
-}
-
-static inline int task_ioprio_class(struct io_context *ioc)
-{
- if (ioprio_valid(ioc->ioprio))
- return IOPRIO_PRIO_CLASS(ioc->ioprio);
-
- return IOPRIO_CLASS_BE;
-}
-
static inline int task_nice_ioprio(struct task_struct *task)
{
return (task_nice(task) + 20) / 5;
diff --git a/include/linux/net.h b/include/linux/net.h
index 2d7510f..e9ac2df 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -313,5 +313,8 @@
MODULE_ALIAS("net-pf-" __stringify(pf) "-proto-" __stringify(proto) \
"-type-" __stringify(type))
+#define MODULE_ALIAS_NET_PF_PROTO_NAME(pf, proto, name) \
+ MODULE_ALIAS("net-pf-" __stringify(pf) "-proto-" __stringify(proto) \
+ name)
#endif /* __KERNEL__ */
#endif /* _LINUX_NET_H */
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index e7fd468..d94cb14 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -2795,15 +2795,15 @@
#define netif_info(priv, type, dev, fmt, args...) \
netif_level(info, priv, type, dev, fmt, ##args)
-#if defined(DEBUG)
-#define netif_dbg(priv, type, dev, format, args...) \
- netif_printk(priv, type, KERN_DEBUG, dev, format, ##args)
-#elif defined(CONFIG_DYNAMIC_DEBUG)
+#if defined(CONFIG_DYNAMIC_DEBUG)
#define netif_dbg(priv, type, netdev, format, args...) \
do { \
if (netif_msg_##type(priv)) \
dynamic_netdev_dbg(netdev, format, ##args); \
} while (0)
+#elif defined(DEBUG)
+#define netif_dbg(priv, type, dev, format, args...) \
+ netif_printk(priv, type, KERN_DEBUG, dev, format, ##args)
#else
#define netif_dbg(priv, type, dev, format, args...) \
({ \
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 0e50171..b534a1b 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1896,8 +1896,6 @@
{
int delta = 0;
- if (headroom < NET_SKB_PAD)
- headroom = NET_SKB_PAD;
if (headroom > skb_headroom(skb))
delta = headroom - skb_headroom(skb);
diff --git a/include/linux/watchdog.h b/include/linux/watchdog.h
index ac40716..da70f0f 100644
--- a/include/linux/watchdog.h
+++ b/include/linux/watchdog.h
@@ -45,6 +45,8 @@
#define WDIOF_SETTIMEOUT 0x0080 /* Set timeout (in seconds) */
#define WDIOF_MAGICCLOSE 0x0100 /* Supports magic close char */
#define WDIOF_PRETIMEOUT 0x0200 /* Pretimeout (in seconds), get/set */
+#define WDIOF_ALARMONLY 0x0400 /* Watchdog triggers a management or
+ other external alarm not a reboot */
#define WDIOF_KEEPALIVEPING 0x8000 /* Keep alive ping reply */
#define WDIOS_DISABLECARD 0x0001 /* Turn off the watchdog timer */
@@ -54,6 +56,8 @@
#ifdef __KERNEL__
#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/cdev.h>
struct watchdog_ops;
struct watchdog_device;
@@ -67,6 +71,8 @@
* @status: The routine that shows the status of the watchdog device.
* @set_timeout:The routine for setting the watchdog devices timeout value.
* @get_timeleft:The routine that get's the time that's left before a reset.
+ * @ref: The ref operation for dyn. allocated watchdog_device structs
+ * @unref: The unref operation for dyn. allocated watchdog_device structs
* @ioctl: The routines that handles extra ioctl calls.
*
* The watchdog_ops structure contains a list of low-level operations
@@ -84,11 +90,17 @@
unsigned int (*status)(struct watchdog_device *);
int (*set_timeout)(struct watchdog_device *, unsigned int);
unsigned int (*get_timeleft)(struct watchdog_device *);
+ void (*ref)(struct watchdog_device *);
+ void (*unref)(struct watchdog_device *);
long (*ioctl)(struct watchdog_device *, unsigned int, unsigned long);
};
/** struct watchdog_device - The structure that defines a watchdog device
*
+ * @id: The watchdog's ID. (Allocated by watchdog_register_device)
+ * @cdev: The watchdog's Character device.
+ * @dev: The device for our watchdog
+ * @parent: The parent bus device
* @info: Pointer to a watchdog_info structure.
* @ops: Pointer to the list of watchdog operations.
* @bootstatus: Status of the watchdog device at boot.
@@ -96,6 +108,7 @@
* @min_timeout:The watchdog devices minimum timeout value.
* @max_timeout:The watchdog devices maximum timeout value.
* @driver-data:Pointer to the drivers private data.
+ * @lock: Lock for watchdog core internal use only.
* @status: Field that contains the devices internal status bits.
*
* The watchdog_device structure contains all information about a
@@ -103,8 +116,15 @@
*
* The driver-data field may not be accessed directly. It must be accessed
* via the watchdog_set_drvdata and watchdog_get_drvdata helpers.
+ *
+ * The lock field is for watchdog core internal use only and should not be
+ * touched.
*/
struct watchdog_device {
+ int id;
+ struct cdev cdev;
+ struct device *dev;
+ struct device *parent;
const struct watchdog_info *info;
const struct watchdog_ops *ops;
unsigned int bootstatus;
@@ -112,12 +132,14 @@
unsigned int min_timeout;
unsigned int max_timeout;
void *driver_data;
+ struct mutex lock;
unsigned long status;
/* Bit numbers for status flags */
#define WDOG_ACTIVE 0 /* Is the watchdog running/active */
#define WDOG_DEV_OPEN 1 /* Opened via /dev/watchdog ? */
#define WDOG_ALLOW_RELEASE 2 /* Did we receive the magic char ? */
#define WDOG_NO_WAY_OUT 3 /* Is 'nowayout' feature set ? */
+#define WDOG_UNREGISTERED 4 /* Has the device been unregistered */
};
#ifdef CONFIG_WATCHDOG_NOWAYOUT
@@ -128,6 +150,12 @@
#define WATCHDOG_NOWAYOUT_INIT_STATUS 0
#endif
+/* Use the following function to check wether or not the watchdog is active */
+static inline bool watchdog_active(struct watchdog_device *wdd)
+{
+ return test_bit(WDOG_ACTIVE, &wdd->status);
+}
+
/* Use the following function to set the nowayout feature */
static inline void watchdog_set_nowayout(struct watchdog_device *wdd, bool nowayout)
{
diff --git a/include/net/dst.h b/include/net/dst.h
index bed833d..8197ead 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -60,6 +60,7 @@
#define DST_NOCOUNT 0x0020
#define DST_NOPEER 0x0040
#define DST_FAKE_RTABLE 0x0080
+#define DST_XFRM_TUNNEL 0x0100
short error;
short obsolete;
diff --git a/init/Kconfig b/init/Kconfig
index 81816b8..1e004d0 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -803,7 +803,7 @@
endif #CGROUP_SCHED
config BLK_CGROUP
- tristate "Block IO controller"
+ bool "Block IO controller"
depends on BLOCK
default n
---help---
diff --git a/kernel/fork.c b/kernel/fork.c
index 017fb23d..31a32c7 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -976,9 +976,8 @@
* Share io context with parent, if CLONE_IO is set
*/
if (clone_flags & CLONE_IO) {
- tsk->io_context = ioc_task_link(ioc);
- if (unlikely(!tsk->io_context))
- return -ENOMEM;
+ ioc_task_link(ioc);
+ tsk->io_context = ioc;
} else if (ioprio_valid(ioc->ioprio)) {
new_ioc = get_task_io_context(tsk, GFP_KERNEL, NUMA_NO_NODE);
if (unlikely(!new_ioc))
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 6420cda..1d0f6a8 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -1486,6 +1486,11 @@
if (!buffer)
return size;
+ /* Make sure the requested buffer exists */
+ if (cpu_id != RING_BUFFER_ALL_CPUS &&
+ !cpumask_test_cpu(cpu_id, buffer->cpumask))
+ return size;
+
size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
size *= BUF_PAGE_SIZE;
diff --git a/lib/dma-debug.c b/lib/dma-debug.c
index 13ef233..518aea7 100644
--- a/lib/dma-debug.c
+++ b/lib/dma-debug.c
@@ -430,7 +430,7 @@
*/
static struct dma_debug_entry *dma_entry_alloc(void)
{
- struct dma_debug_entry *entry = NULL;
+ struct dma_debug_entry *entry;
unsigned long flags;
spin_lock_irqsave(&free_entries_lock, flags);
@@ -438,11 +438,14 @@
if (list_empty(&free_entries)) {
pr_err("DMA-API: debugging out of memory - disabling\n");
global_disable = true;
- goto out;
+ spin_unlock_irqrestore(&free_entries_lock, flags);
+ return NULL;
}
entry = __dma_entry_alloc();
+ spin_unlock_irqrestore(&free_entries_lock, flags);
+
#ifdef CONFIG_STACKTRACE
entry->stacktrace.max_entries = DMA_DEBUG_STACKTRACE_ENTRIES;
entry->stacktrace.entries = entry->st_entries;
@@ -450,9 +453,6 @@
save_stack_trace(&entry->stacktrace);
#endif
-out:
- spin_unlock_irqrestore(&free_entries_lock, flags);
-
return entry;
}
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 285a81e..e198831 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -3036,7 +3036,8 @@
region_add(&inode->i_mapping->private_list, from, to);
return 0;
out_err:
- resv_map_put(vma);
+ if (vma)
+ resv_map_put(vma);
return ret;
}
diff --git a/net/ceph/auth_none.c b/net/ceph/auth_none.c
index 214c2bb..925ca58 100644
--- a/net/ceph/auth_none.c
+++ b/net/ceph/auth_none.c
@@ -59,9 +59,7 @@
*/
static int ceph_auth_none_create_authorizer(
struct ceph_auth_client *ac, int peer_type,
- struct ceph_authorizer **a,
- void **buf, size_t *len,
- void **reply_buf, size_t *reply_len)
+ struct ceph_auth_handshake *auth)
{
struct ceph_auth_none_info *ai = ac->private;
struct ceph_none_authorizer *au = &ai->au;
@@ -82,11 +80,12 @@
dout("built authorizer len %d\n", au->buf_len);
}
- *a = (struct ceph_authorizer *)au;
- *buf = au->buf;
- *len = au->buf_len;
- *reply_buf = au->reply_buf;
- *reply_len = sizeof(au->reply_buf);
+ auth->authorizer = (struct ceph_authorizer *) au;
+ auth->authorizer_buf = au->buf;
+ auth->authorizer_buf_len = au->buf_len;
+ auth->authorizer_reply_buf = au->reply_buf;
+ auth->authorizer_reply_buf_len = sizeof (au->reply_buf);
+
return 0;
bad2:
diff --git a/net/ceph/auth_x.c b/net/ceph/auth_x.c
index 1587dc6..a16bf14 100644
--- a/net/ceph/auth_x.c
+++ b/net/ceph/auth_x.c
@@ -526,9 +526,7 @@
static int ceph_x_create_authorizer(
struct ceph_auth_client *ac, int peer_type,
- struct ceph_authorizer **a,
- void **buf, size_t *len,
- void **reply_buf, size_t *reply_len)
+ struct ceph_auth_handshake *auth)
{
struct ceph_x_authorizer *au;
struct ceph_x_ticket_handler *th;
@@ -548,11 +546,12 @@
return ret;
}
- *a = (struct ceph_authorizer *)au;
- *buf = au->buf->vec.iov_base;
- *len = au->buf->vec.iov_len;
- *reply_buf = au->reply_buf;
- *reply_len = sizeof(au->reply_buf);
+ auth->authorizer = (struct ceph_authorizer *) au;
+ auth->authorizer_buf = au->buf->vec.iov_base;
+ auth->authorizer_buf_len = au->buf->vec.iov_len;
+ auth->authorizer_reply_buf = au->reply_buf;
+ auth->authorizer_reply_buf_len = sizeof (au->reply_buf);
+
return 0;
}
diff --git a/net/ceph/crush/crush.c b/net/ceph/crush/crush.c
index d6ebb13..0896132 100644
--- a/net/ceph/crush/crush.c
+++ b/net/ceph/crush/crush.c
@@ -26,9 +26,9 @@
* @b: bucket pointer
* @p: item index in bucket
*/
-int crush_get_bucket_item_weight(struct crush_bucket *b, int p)
+int crush_get_bucket_item_weight(const struct crush_bucket *b, int p)
{
- if (p >= b->size)
+ if ((__u32)p >= b->size)
return 0;
switch (b->alg) {
@@ -37,38 +37,13 @@
case CRUSH_BUCKET_LIST:
return ((struct crush_bucket_list *)b)->item_weights[p];
case CRUSH_BUCKET_TREE:
- if (p & 1)
- return ((struct crush_bucket_tree *)b)->node_weights[p];
- return 0;
+ return ((struct crush_bucket_tree *)b)->node_weights[crush_calc_tree_node(p)];
case CRUSH_BUCKET_STRAW:
return ((struct crush_bucket_straw *)b)->item_weights[p];
}
return 0;
}
-/**
- * crush_calc_parents - Calculate parent vectors for the given crush map.
- * @map: crush_map pointer
- */
-void crush_calc_parents(struct crush_map *map)
-{
- int i, b, c;
-
- for (b = 0; b < map->max_buckets; b++) {
- if (map->buckets[b] == NULL)
- continue;
- for (i = 0; i < map->buckets[b]->size; i++) {
- c = map->buckets[b]->items[i];
- BUG_ON(c >= map->max_devices ||
- c < -map->max_buckets);
- if (c >= 0)
- map->device_parents[c] = map->buckets[b]->id;
- else
- map->bucket_parents[-1-c] = map->buckets[b]->id;
- }
- }
-}
-
void crush_destroy_bucket_uniform(struct crush_bucket_uniform *b)
{
kfree(b->h.perm);
@@ -87,6 +62,8 @@
void crush_destroy_bucket_tree(struct crush_bucket_tree *b)
{
+ kfree(b->h.perm);
+ kfree(b->h.items);
kfree(b->node_weights);
kfree(b);
}
@@ -124,10 +101,9 @@
*/
void crush_destroy(struct crush_map *map)
{
- int b;
-
/* buckets */
if (map->buckets) {
+ __s32 b;
for (b = 0; b < map->max_buckets; b++) {
if (map->buckets[b] == NULL)
continue;
@@ -138,13 +114,12 @@
/* rules */
if (map->rules) {
+ __u32 b;
for (b = 0; b < map->max_rules; b++)
kfree(map->rules[b]);
kfree(map->rules);
}
- kfree(map->bucket_parents);
- kfree(map->device_parents);
kfree(map);
}
diff --git a/net/ceph/crush/mapper.c b/net/ceph/crush/mapper.c
index 363f8f7..d7edc24 100644
--- a/net/ceph/crush/mapper.c
+++ b/net/ceph/crush/mapper.c
@@ -33,9 +33,9 @@
* @type: storage ruleset type (user defined)
* @size: output set size
*/
-int crush_find_rule(struct crush_map *map, int ruleset, int type, int size)
+int crush_find_rule(const struct crush_map *map, int ruleset, int type, int size)
{
- int i;
+ __u32 i;
for (i = 0; i < map->max_rules; i++) {
if (map->rules[i] &&
@@ -73,7 +73,7 @@
unsigned int i, s;
/* start a new permutation if @x has changed */
- if (bucket->perm_x != x || bucket->perm_n == 0) {
+ if (bucket->perm_x != (__u32)x || bucket->perm_n == 0) {
dprintk("bucket %d new x=%d\n", bucket->id, x);
bucket->perm_x = x;
@@ -153,8 +153,8 @@
return bucket->h.items[i];
}
- BUG_ON(1);
- return 0;
+ dprintk("bad list sums for bucket %d\n", bucket->h.id);
+ return bucket->h.items[0];
}
@@ -220,7 +220,7 @@
static int bucket_straw_choose(struct crush_bucket_straw *bucket,
int x, int r)
{
- int i;
+ __u32 i;
int high = 0;
__u64 high_draw = 0;
__u64 draw;
@@ -240,6 +240,7 @@
static int crush_bucket_choose(struct crush_bucket *in, int x, int r)
{
dprintk(" crush_bucket_choose %d x=%d r=%d\n", in->id, x, r);
+ BUG_ON(in->size == 0);
switch (in->alg) {
case CRUSH_BUCKET_UNIFORM:
return bucket_uniform_choose((struct crush_bucket_uniform *)in,
@@ -254,7 +255,7 @@
return bucket_straw_choose((struct crush_bucket_straw *)in,
x, r);
default:
- BUG_ON(1);
+ dprintk("unknown bucket %d alg %d\n", in->id, in->alg);
return in->items[0];
}
}
@@ -263,7 +264,7 @@
* true if device is marked "out" (failed, fully offloaded)
* of the cluster
*/
-static int is_out(struct crush_map *map, __u32 *weight, int item, int x)
+static int is_out(const struct crush_map *map, const __u32 *weight, int item, int x)
{
if (weight[item] >= 0x10000)
return 0;
@@ -288,16 +289,16 @@
* @recurse_to_leaf: true if we want one device under each item of given type
* @out2: second output vector for leaf items (if @recurse_to_leaf)
*/
-static int crush_choose(struct crush_map *map,
+static int crush_choose(const struct crush_map *map,
struct crush_bucket *bucket,
- __u32 *weight,
+ const __u32 *weight,
int x, int numrep, int type,
int *out, int outpos,
int firstn, int recurse_to_leaf,
int *out2)
{
int rep;
- int ftotal, flocal;
+ unsigned int ftotal, flocal;
int retry_descent, retry_bucket, skip_rep;
struct crush_bucket *in = bucket;
int r;
@@ -305,7 +306,7 @@
int item = 0;
int itemtype;
int collide, reject;
- const int orig_tries = 5; /* attempts before we fall back to search */
+ const unsigned int orig_tries = 5; /* attempts before we fall back to search */
dprintk("CHOOSE%s bucket %d x %d outpos %d numrep %d\n", recurse_to_leaf ? "_LEAF" : "",
bucket->id, x, outpos, numrep);
@@ -326,7 +327,7 @@
r = rep;
if (in->alg == CRUSH_BUCKET_UNIFORM) {
/* be careful */
- if (firstn || numrep >= in->size)
+ if (firstn || (__u32)numrep >= in->size)
/* r' = r + f_total */
r += ftotal;
else if (in->size % numrep == 0)
@@ -355,7 +356,11 @@
item = bucket_perm_choose(in, x, r);
else
item = crush_bucket_choose(in, x, r);
- BUG_ON(item >= map->max_devices);
+ if (item >= map->max_devices) {
+ dprintk(" bad item %d\n", item);
+ skip_rep = 1;
+ break;
+ }
/* desired type? */
if (item < 0)
@@ -366,8 +371,12 @@
/* keep going? */
if (itemtype != type) {
- BUG_ON(item >= 0 ||
- (-1-item) >= map->max_buckets);
+ if (item >= 0 ||
+ (-1-item) >= map->max_buckets) {
+ dprintk(" bad item type %d\n", type);
+ skip_rep = 1;
+ break;
+ }
in = map->buckets[-1-item];
retry_bucket = 1;
continue;
@@ -416,7 +425,7 @@
if (collide && flocal < 3)
/* retry locally a few times */
retry_bucket = 1;
- else if (flocal < in->size + orig_tries)
+ else if (flocal <= in->size + orig_tries)
/* exhaustive bucket search */
retry_bucket = 1;
else if (ftotal < 20)
@@ -426,7 +435,7 @@
/* else give up */
skip_rep = 1;
dprintk(" reject %d collide %d "
- "ftotal %d flocal %d\n",
+ "ftotal %u flocal %u\n",
reject, collide, ftotal,
flocal);
}
@@ -455,15 +464,12 @@
* @x: hash input
* @result: pointer to result vector
* @result_max: maximum result size
- * @force: force initial replica choice; -1 for none
*/
-int crush_do_rule(struct crush_map *map,
+int crush_do_rule(const struct crush_map *map,
int ruleno, int x, int *result, int result_max,
- int force, __u32 *weight)
+ const __u32 *weight)
{
int result_len;
- int force_context[CRUSH_MAX_DEPTH];
- int force_pos = -1;
int a[CRUSH_MAX_SET];
int b[CRUSH_MAX_SET];
int c[CRUSH_MAX_SET];
@@ -474,66 +480,44 @@
int osize;
int *tmp;
struct crush_rule *rule;
- int step;
+ __u32 step;
int i, j;
int numrep;
int firstn;
- BUG_ON(ruleno >= map->max_rules);
+ if ((__u32)ruleno >= map->max_rules) {
+ dprintk(" bad ruleno %d\n", ruleno);
+ return 0;
+ }
rule = map->rules[ruleno];
result_len = 0;
w = a;
o = b;
- /*
- * determine hierarchical context of force, if any. note
- * that this may or may not correspond to the specific types
- * referenced by the crush rule.
- */
- if (force >= 0 &&
- force < map->max_devices &&
- map->device_parents[force] != 0 &&
- !is_out(map, weight, force, x)) {
- while (1) {
- force_context[++force_pos] = force;
- if (force >= 0)
- force = map->device_parents[force];
- else
- force = map->bucket_parents[-1-force];
- if (force == 0)
- break;
- }
- }
-
for (step = 0; step < rule->len; step++) {
+ struct crush_rule_step *curstep = &rule->steps[step];
+
firstn = 0;
- switch (rule->steps[step].op) {
+ switch (curstep->op) {
case CRUSH_RULE_TAKE:
- w[0] = rule->steps[step].arg1;
-
- /* find position in force_context/hierarchy */
- while (force_pos >= 0 &&
- force_context[force_pos] != w[0])
- force_pos--;
- /* and move past it */
- if (force_pos >= 0)
- force_pos--;
-
+ w[0] = curstep->arg1;
wsize = 1;
break;
case CRUSH_RULE_CHOOSE_LEAF_FIRSTN:
case CRUSH_RULE_CHOOSE_FIRSTN:
firstn = 1;
+ /* fall through */
case CRUSH_RULE_CHOOSE_LEAF_INDEP:
case CRUSH_RULE_CHOOSE_INDEP:
- BUG_ON(wsize == 0);
+ if (wsize == 0)
+ break;
recurse_to_leaf =
- rule->steps[step].op ==
+ curstep->op ==
CRUSH_RULE_CHOOSE_LEAF_FIRSTN ||
- rule->steps[step].op ==
+ curstep->op ==
CRUSH_RULE_CHOOSE_LEAF_INDEP;
/* reset output */
@@ -545,32 +529,18 @@
* basically, numrep <= 0 means relative to
* the provided result_max
*/
- numrep = rule->steps[step].arg1;
+ numrep = curstep->arg1;
if (numrep <= 0) {
numrep += result_max;
if (numrep <= 0)
continue;
}
j = 0;
- if (osize == 0 && force_pos >= 0) {
- /* skip any intermediate types */
- while (force_pos &&
- force_context[force_pos] < 0 &&
- rule->steps[step].arg2 !=
- map->buckets[-1 -
- force_context[force_pos]]->type)
- force_pos--;
- o[osize] = force_context[force_pos];
- if (recurse_to_leaf)
- c[osize] = force_context[0];
- j++;
- force_pos--;
- }
osize += crush_choose(map,
map->buckets[-1-w[i]],
weight,
x, numrep,
- rule->steps[step].arg2,
+ curstep->arg2,
o+osize, j,
firstn,
recurse_to_leaf, c+osize);
@@ -597,7 +567,9 @@
break;
default:
- BUG_ON(1);
+ dprintk(" unknown op %d at step %d\n",
+ curstep->op, step);
+ break;
}
}
return result_len;
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index 36fa6bf..524f4e4 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -653,54 +653,57 @@
* Connection negotiation.
*/
-static int prepare_connect_authorizer(struct ceph_connection *con)
+static struct ceph_auth_handshake *get_connect_authorizer(struct ceph_connection *con,
+ int *auth_proto)
{
- void *auth_buf;
- int auth_len = 0;
- int auth_protocol = 0;
+ struct ceph_auth_handshake *auth;
+
+ if (!con->ops->get_authorizer) {
+ con->out_connect.authorizer_protocol = CEPH_AUTH_UNKNOWN;
+ con->out_connect.authorizer_len = 0;
+
+ return NULL;
+ }
+
+ /* Can't hold the mutex while getting authorizer */
mutex_unlock(&con->mutex);
- if (con->ops->get_authorizer)
- con->ops->get_authorizer(con, &auth_buf, &auth_len,
- &auth_protocol, &con->auth_reply_buf,
- &con->auth_reply_buf_len,
- con->auth_retry);
+
+ auth = con->ops->get_authorizer(con, auth_proto, con->auth_retry);
+
mutex_lock(&con->mutex);
- if (test_bit(CLOSED, &con->state) ||
- test_bit(OPENING, &con->state))
- return -EAGAIN;
+ if (IS_ERR(auth))
+ return auth;
+ if (test_bit(CLOSED, &con->state) || test_bit(OPENING, &con->state))
+ return ERR_PTR(-EAGAIN);
- con->out_connect.authorizer_protocol = cpu_to_le32(auth_protocol);
- con->out_connect.authorizer_len = cpu_to_le32(auth_len);
+ con->auth_reply_buf = auth->authorizer_reply_buf;
+ con->auth_reply_buf_len = auth->authorizer_reply_buf_len;
- if (auth_len)
- ceph_con_out_kvec_add(con, auth_len, auth_buf);
- return 0;
+ return auth;
}
/*
* We connected to a peer and are saying hello.
*/
-static void prepare_write_banner(struct ceph_messenger *msgr,
- struct ceph_connection *con)
+static void prepare_write_banner(struct ceph_connection *con)
{
- ceph_con_out_kvec_reset(con);
ceph_con_out_kvec_add(con, strlen(CEPH_BANNER), CEPH_BANNER);
- ceph_con_out_kvec_add(con, sizeof (msgr->my_enc_addr),
- &msgr->my_enc_addr);
+ ceph_con_out_kvec_add(con, sizeof (con->msgr->my_enc_addr),
+ &con->msgr->my_enc_addr);
con->out_more = 0;
set_bit(WRITE_PENDING, &con->state);
}
-static int prepare_write_connect(struct ceph_messenger *msgr,
- struct ceph_connection *con,
- int include_banner)
+static int prepare_write_connect(struct ceph_connection *con)
{
unsigned int global_seq = get_global_seq(con->msgr, 0);
int proto;
+ int auth_proto;
+ struct ceph_auth_handshake *auth;
switch (con->peer_name.type) {
case CEPH_ENTITY_TYPE_MON:
@@ -719,23 +722,32 @@
dout("prepare_write_connect %p cseq=%d gseq=%d proto=%d\n", con,
con->connect_seq, global_seq, proto);
- con->out_connect.features = cpu_to_le64(msgr->supported_features);
+ con->out_connect.features = cpu_to_le64(con->msgr->supported_features);
con->out_connect.host_type = cpu_to_le32(CEPH_ENTITY_TYPE_CLIENT);
con->out_connect.connect_seq = cpu_to_le32(con->connect_seq);
con->out_connect.global_seq = cpu_to_le32(global_seq);
con->out_connect.protocol_version = cpu_to_le32(proto);
con->out_connect.flags = 0;
- if (include_banner)
- prepare_write_banner(msgr, con);
- else
- ceph_con_out_kvec_reset(con);
- ceph_con_out_kvec_add(con, sizeof (con->out_connect), &con->out_connect);
+ auth_proto = CEPH_AUTH_UNKNOWN;
+ auth = get_connect_authorizer(con, &auth_proto);
+ if (IS_ERR(auth))
+ return PTR_ERR(auth);
+
+ con->out_connect.authorizer_protocol = cpu_to_le32(auth_proto);
+ con->out_connect.authorizer_len = auth ?
+ cpu_to_le32(auth->authorizer_buf_len) : 0;
+
+ ceph_con_out_kvec_add(con, sizeof (con->out_connect),
+ &con->out_connect);
+ if (auth && auth->authorizer_buf_len)
+ ceph_con_out_kvec_add(con, auth->authorizer_buf_len,
+ auth->authorizer_buf);
con->out_more = 0;
set_bit(WRITE_PENDING, &con->state);
- return prepare_connect_authorizer(con);
+ return 0;
}
/*
@@ -992,11 +1004,10 @@
static int read_partial(struct ceph_connection *con,
- int *to, int size, void *object)
+ int end, int size, void *object)
{
- *to += size;
- while (con->in_base_pos < *to) {
- int left = *to - con->in_base_pos;
+ while (con->in_base_pos < end) {
+ int left = end - con->in_base_pos;
int have = size - left;
int ret = ceph_tcp_recvmsg(con->sock, object + have, left);
if (ret <= 0)
@@ -1012,37 +1023,52 @@
*/
static int read_partial_banner(struct ceph_connection *con)
{
- int ret, to = 0;
+ int size;
+ int end;
+ int ret;
dout("read_partial_banner %p at %d\n", con, con->in_base_pos);
/* peer's banner */
- ret = read_partial(con, &to, strlen(CEPH_BANNER), con->in_banner);
+ size = strlen(CEPH_BANNER);
+ end = size;
+ ret = read_partial(con, end, size, con->in_banner);
if (ret <= 0)
goto out;
- ret = read_partial(con, &to, sizeof(con->actual_peer_addr),
- &con->actual_peer_addr);
+
+ size = sizeof (con->actual_peer_addr);
+ end += size;
+ ret = read_partial(con, end, size, &con->actual_peer_addr);
if (ret <= 0)
goto out;
- ret = read_partial(con, &to, sizeof(con->peer_addr_for_me),
- &con->peer_addr_for_me);
+
+ size = sizeof (con->peer_addr_for_me);
+ end += size;
+ ret = read_partial(con, end, size, &con->peer_addr_for_me);
if (ret <= 0)
goto out;
+
out:
return ret;
}
static int read_partial_connect(struct ceph_connection *con)
{
- int ret, to = 0;
+ int size;
+ int end;
+ int ret;
dout("read_partial_connect %p at %d\n", con, con->in_base_pos);
- ret = read_partial(con, &to, sizeof(con->in_reply), &con->in_reply);
+ size = sizeof (con->in_reply);
+ end = size;
+ ret = read_partial(con, end, size, &con->in_reply);
if (ret <= 0)
goto out;
- ret = read_partial(con, &to, le32_to_cpu(con->in_reply.authorizer_len),
- con->auth_reply_buf);
+
+ size = le32_to_cpu(con->in_reply.authorizer_len);
+ end += size;
+ ret = read_partial(con, end, size, con->auth_reply_buf);
if (ret <= 0)
goto out;
@@ -1377,7 +1403,8 @@
return -1;
}
con->auth_retry = 1;
- ret = prepare_write_connect(con->msgr, con, 0);
+ ceph_con_out_kvec_reset(con);
+ ret = prepare_write_connect(con);
if (ret < 0)
return ret;
prepare_read_connect(con);
@@ -1397,7 +1424,10 @@
ENTITY_NAME(con->peer_name),
ceph_pr_addr(&con->peer_addr.in_addr));
reset_connection(con);
- prepare_write_connect(con->msgr, con, 0);
+ ceph_con_out_kvec_reset(con);
+ ret = prepare_write_connect(con);
+ if (ret < 0)
+ return ret;
prepare_read_connect(con);
/* Tell ceph about it. */
@@ -1420,7 +1450,10 @@
le32_to_cpu(con->out_connect.connect_seq),
le32_to_cpu(con->in_connect.connect_seq));
con->connect_seq = le32_to_cpu(con->in_connect.connect_seq);
- prepare_write_connect(con->msgr, con, 0);
+ ceph_con_out_kvec_reset(con);
+ ret = prepare_write_connect(con);
+ if (ret < 0)
+ return ret;
prepare_read_connect(con);
break;
@@ -1434,7 +1467,10 @@
le32_to_cpu(con->in_connect.global_seq));
get_global_seq(con->msgr,
le32_to_cpu(con->in_connect.global_seq));
- prepare_write_connect(con->msgr, con, 0);
+ ceph_con_out_kvec_reset(con);
+ ret = prepare_write_connect(con);
+ if (ret < 0)
+ return ret;
prepare_read_connect(con);
break;
@@ -1491,10 +1527,10 @@
*/
static int read_partial_ack(struct ceph_connection *con)
{
- int to = 0;
+ int size = sizeof (con->in_temp_ack);
+ int end = size;
- return read_partial(con, &to, sizeof(con->in_temp_ack),
- &con->in_temp_ack);
+ return read_partial(con, end, size, &con->in_temp_ack);
}
@@ -1627,8 +1663,9 @@
static int read_partial_message(struct ceph_connection *con)
{
struct ceph_msg *m = con->in_msg;
+ int size;
+ int end;
int ret;
- int to, left;
unsigned int front_len, middle_len, data_len;
bool do_datacrc = !con->msgr->nocrc;
int skip;
@@ -1638,15 +1675,11 @@
dout("read_partial_message con %p msg %p\n", con, m);
/* header */
- while (con->in_base_pos < sizeof(con->in_hdr)) {
- left = sizeof(con->in_hdr) - con->in_base_pos;
- ret = ceph_tcp_recvmsg(con->sock,
- (char *)&con->in_hdr + con->in_base_pos,
- left);
- if (ret <= 0)
- return ret;
- con->in_base_pos += ret;
- }
+ size = sizeof (con->in_hdr);
+ end = size;
+ ret = read_partial(con, end, size, &con->in_hdr);
+ if (ret <= 0)
+ return ret;
crc = crc32c(0, &con->in_hdr, offsetof(struct ceph_msg_header, crc));
if (cpu_to_le32(crc) != con->in_hdr.crc) {
@@ -1759,16 +1792,12 @@
}
/* footer */
- to = sizeof(m->hdr) + sizeof(m->footer);
- while (con->in_base_pos < to) {
- left = to - con->in_base_pos;
- ret = ceph_tcp_recvmsg(con->sock, (char *)&m->footer +
- (con->in_base_pos - sizeof(m->hdr)),
- left);
- if (ret <= 0)
- return ret;
- con->in_base_pos += ret;
- }
+ size = sizeof (m->footer);
+ end += size;
+ ret = read_partial(con, end, size, &m->footer);
+ if (ret <= 0)
+ return ret;
+
dout("read_partial_message got msg %p %d (%u) + %d (%u) + %d (%u)\n",
m, front_len, m->footer.front_crc, middle_len,
m->footer.middle_crc, data_len, m->footer.data_crc);
@@ -1835,7 +1864,6 @@
*/
static int try_write(struct ceph_connection *con)
{
- struct ceph_messenger *msgr = con->msgr;
int ret = 1;
dout("try_write start %p state %lu nref %d\n", con, con->state,
@@ -1846,7 +1874,11 @@
/* open the socket first? */
if (con->sock == NULL) {
- prepare_write_connect(msgr, con, 1);
+ ceph_con_out_kvec_reset(con);
+ prepare_write_banner(con);
+ ret = prepare_write_connect(con);
+ if (ret < 0)
+ goto out;
prepare_read_banner(con);
set_bit(CONNECTING, &con->state);
clear_bit(NEGOTIATING, &con->state);
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 1b0ef3c..1ffebed 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -278,7 +278,7 @@
{
dst->op = cpu_to_le16(src->op);
- switch (dst->op) {
+ switch (src->op) {
case CEPH_OSD_OP_READ:
case CEPH_OSD_OP_WRITE:
dst->extent.offset =
@@ -664,11 +664,11 @@
{
dout("put_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref),
atomic_read(&osd->o_ref) - 1);
- if (atomic_dec_and_test(&osd->o_ref)) {
+ if (atomic_dec_and_test(&osd->o_ref) && osd->o_auth.authorizer) {
struct ceph_auth_client *ac = osd->o_osdc->client->monc.auth;
- if (osd->o_authorizer)
- ac->ops->destroy_authorizer(ac, osd->o_authorizer);
+ if (ac->ops && ac->ops->destroy_authorizer)
+ ac->ops->destroy_authorizer(ac, osd->o_auth.authorizer);
kfree(osd);
}
}
@@ -841,6 +841,12 @@
static void __unregister_request(struct ceph_osd_client *osdc,
struct ceph_osd_request *req)
{
+ if (RB_EMPTY_NODE(&req->r_node)) {
+ dout("__unregister_request %p tid %lld not registered\n",
+ req, req->r_tid);
+ return;
+ }
+
dout("__unregister_request %p tid %lld\n", req, req->r_tid);
rb_erase(&req->r_node, &osdc->requests);
osdc->num_requests--;
@@ -2108,37 +2114,32 @@
/*
* authentication
*/
-static int get_authorizer(struct ceph_connection *con,
- void **buf, int *len, int *proto,
- void **reply_buf, int *reply_len, int force_new)
+/*
+ * Note: returned pointer is the address of a structure that's
+ * managed separately. Caller must *not* attempt to free it.
+ */
+static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con,
+ int *proto, int force_new)
{
struct ceph_osd *o = con->private;
struct ceph_osd_client *osdc = o->o_osdc;
struct ceph_auth_client *ac = osdc->client->monc.auth;
- int ret = 0;
+ struct ceph_auth_handshake *auth = &o->o_auth;
- if (force_new && o->o_authorizer) {
- ac->ops->destroy_authorizer(ac, o->o_authorizer);
- o->o_authorizer = NULL;
+ if (force_new && auth->authorizer) {
+ if (ac->ops && ac->ops->destroy_authorizer)
+ ac->ops->destroy_authorizer(ac, auth->authorizer);
+ auth->authorizer = NULL;
}
- if (o->o_authorizer == NULL) {
- ret = ac->ops->create_authorizer(
- ac, CEPH_ENTITY_TYPE_OSD,
- &o->o_authorizer,
- &o->o_authorizer_buf,
- &o->o_authorizer_buf_len,
- &o->o_authorizer_reply_buf,
- &o->o_authorizer_reply_buf_len);
+ if (!auth->authorizer && ac->ops && ac->ops->create_authorizer) {
+ int ret = ac->ops->create_authorizer(ac, CEPH_ENTITY_TYPE_OSD,
+ auth);
if (ret)
- return ret;
+ return ERR_PTR(ret);
}
-
*proto = ac->protocol;
- *buf = o->o_authorizer_buf;
- *len = o->o_authorizer_buf_len;
- *reply_buf = o->o_authorizer_reply_buf;
- *reply_len = o->o_authorizer_reply_buf_len;
- return 0;
+
+ return auth;
}
@@ -2148,7 +2149,11 @@
struct ceph_osd_client *osdc = o->o_osdc;
struct ceph_auth_client *ac = osdc->client->monc.auth;
- return ac->ops->verify_authorizer_reply(ac, o->o_authorizer, len);
+ /*
+ * XXX If ac->ops or ac->ops->verify_authorizer_reply is null,
+ * XXX which do we do: succeed or fail?
+ */
+ return ac->ops->verify_authorizer_reply(ac, o->o_auth.authorizer, len);
}
static int invalidate_authorizer(struct ceph_connection *con)
@@ -2157,7 +2162,7 @@
struct ceph_osd_client *osdc = o->o_osdc;
struct ceph_auth_client *ac = osdc->client->monc.auth;
- if (ac->ops->invalidate_authorizer)
+ if (ac->ops && ac->ops->invalidate_authorizer)
ac->ops->invalidate_authorizer(ac, CEPH_ENTITY_TYPE_OSD);
return ceph_monc_validate_auth(&osdc->client->monc);
diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c
index 56e561a..81e3b84 100644
--- a/net/ceph/osdmap.c
+++ b/net/ceph/osdmap.c
@@ -161,13 +161,6 @@
c->max_rules = ceph_decode_32(p);
c->max_devices = ceph_decode_32(p);
- c->device_parents = kcalloc(c->max_devices, sizeof(u32), GFP_NOFS);
- if (c->device_parents == NULL)
- goto badmem;
- c->bucket_parents = kcalloc(c->max_buckets, sizeof(u32), GFP_NOFS);
- if (c->bucket_parents == NULL)
- goto badmem;
-
c->buckets = kcalloc(c->max_buckets, sizeof(*c->buckets), GFP_NOFS);
if (c->buckets == NULL)
goto badmem;
@@ -890,8 +883,12 @@
pglen = ceph_decode_32(p);
if (pglen) {
- /* insert */
ceph_decode_need(p, end, pglen*sizeof(u32), bad);
+
+ /* removing existing (if any) */
+ (void) __remove_pg_mapping(&map->pg_temp, pgid);
+
+ /* insert */
pg = kmalloc(sizeof(*pg) + sizeof(u32)*pglen, GFP_NOFS);
if (!pg) {
err = -ENOMEM;
@@ -1000,7 +997,6 @@
{
unsigned int num, num_mask;
struct ceph_pg pgid;
- s32 preferred = (s32)le32_to_cpu(fl->fl_pg_preferred);
int poolid = le32_to_cpu(fl->fl_pg_pool);
struct ceph_pg_pool_info *pool;
unsigned int ps;
@@ -1011,23 +1007,13 @@
if (!pool)
return -EIO;
ps = ceph_str_hash(pool->v.object_hash, oid, strlen(oid));
- if (preferred >= 0) {
- ps += preferred;
- num = le32_to_cpu(pool->v.lpg_num);
- num_mask = pool->lpg_num_mask;
- } else {
- num = le32_to_cpu(pool->v.pg_num);
- num_mask = pool->pg_num_mask;
- }
+ num = le32_to_cpu(pool->v.pg_num);
+ num_mask = pool->pg_num_mask;
pgid.ps = cpu_to_le16(ps);
- pgid.preferred = cpu_to_le16(preferred);
+ pgid.preferred = cpu_to_le16(-1);
pgid.pool = fl->fl_pg_pool;
- if (preferred >= 0)
- dout("calc_object_layout '%s' pgid %d.%xp%d\n", oid, poolid, ps,
- (int)preferred);
- else
- dout("calc_object_layout '%s' pgid %d.%x\n", oid, poolid, ps);
+ dout("calc_object_layout '%s' pgid %d.%x\n", oid, poolid, ps);
ol->ol_pgid = pgid;
ol->ol_stripe_unit = fl->fl_object_stripe_unit;
@@ -1045,24 +1031,18 @@
struct ceph_pg_mapping *pg;
struct ceph_pg_pool_info *pool;
int ruleno;
- unsigned int poolid, ps, pps, t;
- int preferred;
+ unsigned int poolid, ps, pps, t, r;
poolid = le32_to_cpu(pgid.pool);
ps = le16_to_cpu(pgid.ps);
- preferred = (s16)le16_to_cpu(pgid.preferred);
pool = __lookup_pg_pool(&osdmap->pg_pools, poolid);
if (!pool)
return NULL;
/* pg_temp? */
- if (preferred >= 0)
- t = ceph_stable_mod(ps, le32_to_cpu(pool->v.lpg_num),
- pool->lpgp_num_mask);
- else
- t = ceph_stable_mod(ps, le32_to_cpu(pool->v.pg_num),
- pool->pgp_num_mask);
+ t = ceph_stable_mod(ps, le32_to_cpu(pool->v.pg_num),
+ pool->pgp_num_mask);
pgid.ps = cpu_to_le16(t);
pg = __lookup_pg_mapping(&osdmap->pg_temp, pgid);
if (pg) {
@@ -1080,23 +1060,20 @@
return NULL;
}
- /* don't forcefeed bad device ids to crush */
- if (preferred >= osdmap->max_osd ||
- preferred >= osdmap->crush->max_devices)
- preferred = -1;
-
- if (preferred >= 0)
- pps = ceph_stable_mod(ps,
- le32_to_cpu(pool->v.lpgp_num),
- pool->lpgp_num_mask);
- else
- pps = ceph_stable_mod(ps,
- le32_to_cpu(pool->v.pgp_num),
- pool->pgp_num_mask);
+ pps = ceph_stable_mod(ps,
+ le32_to_cpu(pool->v.pgp_num),
+ pool->pgp_num_mask);
pps += poolid;
- *num = crush_do_rule(osdmap->crush, ruleno, pps, osds,
- min_t(int, pool->v.size, *num),
- preferred, osdmap->osd_weight);
+ r = crush_do_rule(osdmap->crush, ruleno, pps, osds,
+ min_t(int, pool->v.size, *num),
+ osdmap->osd_weight);
+ if (r < 0) {
+ pr_err("error %d from crush rule: pool %d ruleset %d type %d"
+ " size %d\n", r, poolid, pool->v.crush_ruleset,
+ pool->v.type, pool->v.size);
+ return NULL;
+ }
+ *num = r;
return osds;
}
diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c
index 3252e7e..ea5fb9f 100644
--- a/net/core/drop_monitor.c
+++ b/net/core/drop_monitor.c
@@ -468,3 +468,4 @@
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Neil Horman <nhorman@tuxdriver.com>");
+MODULE_ALIAS_GENL_FAMILY("NET_DM");
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index 89a47b3..cb982a6 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -459,28 +459,22 @@
struct esp_data *esp = x->data;
u32 blksize = ALIGN(crypto_aead_blocksize(esp->aead), 4);
u32 align = max_t(u32, blksize, esp->padlen);
- u32 rem;
-
- mtu -= x->props.header_len + crypto_aead_authsize(esp->aead);
- rem = mtu & (align - 1);
- mtu &= ~(align - 1);
+ unsigned int net_adj;
switch (x->props.mode) {
+ case XFRM_MODE_TRANSPORT:
+ case XFRM_MODE_BEET:
+ net_adj = sizeof(struct iphdr);
+ break;
case XFRM_MODE_TUNNEL:
+ net_adj = 0;
break;
default:
- case XFRM_MODE_TRANSPORT:
- /* The worst case */
- mtu -= blksize - 4;
- mtu += min_t(u32, blksize - 4, rem);
- break;
- case XFRM_MODE_BEET:
- /* The worst case. */
- mtu += min_t(u32, IPV4_BEET_PHMAXLEN, rem);
- break;
+ BUG();
}
- return mtu - 2;
+ return ((mtu - x->props.header_len - crypto_aead_authsize(esp->aead) -
+ net_adj) & ~(align - 1)) + (net_adj - 2);
}
static void esp4_err(struct sk_buff *skb, u32 info)
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index 1e62b755..db1521f 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -413,19 +413,15 @@
struct esp_data *esp = x->data;
u32 blksize = ALIGN(crypto_aead_blocksize(esp->aead), 4);
u32 align = max_t(u32, blksize, esp->padlen);
- u32 rem;
+ unsigned int net_adj;
- mtu -= x->props.header_len + crypto_aead_authsize(esp->aead);
- rem = mtu & (align - 1);
- mtu &= ~(align - 1);
+ if (x->props.mode != XFRM_MODE_TUNNEL)
+ net_adj = sizeof(struct ipv6hdr);
+ else
+ net_adj = 0;
- if (x->props.mode != XFRM_MODE_TUNNEL) {
- u32 padsize = ((blksize - 1) & 7) + 1;
- mtu -= blksize - padsize;
- mtu += min_t(u32, blksize - padsize, rem);
- }
-
- return mtu - 2;
+ return ((mtu - x->props.header_len - crypto_aead_authsize(esp->aead) -
+ net_adj) & ~(align - 1)) + (net_adj - 2);
}
static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index d99fdc6..17b8c67 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -1187,6 +1187,29 @@
return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL;
}
+static void ip6_append_data_mtu(int *mtu,
+ int *maxfraglen,
+ unsigned int fragheaderlen,
+ struct sk_buff *skb,
+ struct rt6_info *rt)
+{
+ if (!(rt->dst.flags & DST_XFRM_TUNNEL)) {
+ if (skb == NULL) {
+ /* first fragment, reserve header_len */
+ *mtu = *mtu - rt->dst.header_len;
+
+ } else {
+ /*
+ * this fragment is not first, the headers
+ * space is regarded as data space.
+ */
+ *mtu = dst_mtu(rt->dst.path);
+ }
+ *maxfraglen = ((*mtu - fragheaderlen) & ~7)
+ + fragheaderlen - sizeof(struct frag_hdr);
+ }
+}
+
int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
int offset, int len, int odd, struct sk_buff *skb),
void *from, int length, int transhdrlen,
@@ -1196,7 +1219,7 @@
struct inet_sock *inet = inet_sk(sk);
struct ipv6_pinfo *np = inet6_sk(sk);
struct inet_cork *cork;
- struct sk_buff *skb;
+ struct sk_buff *skb, *skb_prev = NULL;
unsigned int maxfraglen, fragheaderlen;
int exthdrlen;
int dst_exthdrlen;
@@ -1253,8 +1276,12 @@
inet->cork.fl.u.ip6 = *fl6;
np->cork.hop_limit = hlimit;
np->cork.tclass = tclass;
- mtu = np->pmtudisc == IPV6_PMTUDISC_PROBE ?
- rt->dst.dev->mtu : dst_mtu(&rt->dst);
+ if (rt->dst.flags & DST_XFRM_TUNNEL)
+ mtu = np->pmtudisc == IPV6_PMTUDISC_PROBE ?
+ rt->dst.dev->mtu : dst_mtu(&rt->dst);
+ else
+ mtu = np->pmtudisc == IPV6_PMTUDISC_PROBE ?
+ rt->dst.dev->mtu : dst_mtu(rt->dst.path);
if (np->frag_size < mtu) {
if (np->frag_size)
mtu = np->frag_size;
@@ -1350,25 +1377,27 @@
unsigned int fraglen;
unsigned int fraggap;
unsigned int alloclen;
- struct sk_buff *skb_prev;
alloc_new_skb:
- skb_prev = skb;
-
/* There's no room in the current skb */
- if (skb_prev)
- fraggap = skb_prev->len - maxfraglen;
+ if (skb)
+ fraggap = skb->len - maxfraglen;
else
fraggap = 0;
+ /* update mtu and maxfraglen if necessary */
+ if (skb == NULL || skb_prev == NULL)
+ ip6_append_data_mtu(&mtu, &maxfraglen,
+ fragheaderlen, skb, rt);
+
+ skb_prev = skb;
/*
* If remaining data exceeds the mtu,
* we know we need more fragment(s).
*/
datalen = length + fraggap;
- if (datalen > (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen)
- datalen = maxfraglen - fragheaderlen;
- fraglen = datalen + fragheaderlen;
+ if (datalen > (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen)
+ datalen = maxfraglen - fragheaderlen - rt->dst.trailer_len;
if ((flags & MSG_MORE) &&
!(rt->dst.dev->features&NETIF_F_SG))
alloclen = mtu;
@@ -1377,13 +1406,16 @@
alloclen += dst_exthdrlen;
- /*
- * The last fragment gets additional space at tail.
- * Note: we overallocate on fragments with MSG_MODE
- * because we have no idea if we're the last one.
- */
- if (datalen == length + fraggap)
- alloclen += rt->dst.trailer_len;
+ if (datalen != length + fraggap) {
+ /*
+ * this is not the last fragment, the trailer
+ * space is regarded as data space.
+ */
+ datalen += rt->dst.trailer_len;
+ }
+
+ alloclen += rt->dst.trailer_len;
+ fraglen = datalen + fragheaderlen;
/*
* We just reserve space for fragment header.
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index 889f5d1..70614e7 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -239,9 +239,16 @@
{
struct inet_sock *inet = inet_sk(sk);
struct sockaddr_l2tpip *addr = (struct sockaddr_l2tpip *) uaddr;
- int ret = -EINVAL;
+ int ret;
int chk_addr_ret;
+ if (!sock_flag(sk, SOCK_ZAPPED))
+ return -EINVAL;
+ if (addr_len < sizeof(struct sockaddr_l2tpip))
+ return -EINVAL;
+ if (addr->l2tp_family != AF_INET)
+ return -EINVAL;
+
ret = -EADDRINUSE;
read_lock_bh(&l2tp_ip_lock);
if (__l2tp_ip_bind_lookup(&init_net, addr->l2tp_addr.s_addr, sk->sk_bound_dev_if, addr->l2tp_conn_id))
@@ -272,6 +279,8 @@
sk_del_node_init(sk);
write_unlock_bh(&l2tp_ip_lock);
ret = 0;
+ sock_reset_flag(sk, SOCK_ZAPPED);
+
out:
release_sock(sk);
@@ -288,6 +297,9 @@
struct sockaddr_l2tpip *lsa = (struct sockaddr_l2tpip *) uaddr;
int rc;
+ if (sock_flag(sk, SOCK_ZAPPED)) /* Must bind first - autobinding does not work */
+ return -EINVAL;
+
if (addr_len < sizeof(*lsa))
return -EINVAL;
@@ -311,6 +323,14 @@
return rc;
}
+static int l2tp_ip_disconnect(struct sock *sk, int flags)
+{
+ if (sock_flag(sk, SOCK_ZAPPED))
+ return 0;
+
+ return udp_disconnect(sk, flags);
+}
+
static int l2tp_ip_getname(struct socket *sock, struct sockaddr *uaddr,
int *uaddr_len, int peer)
{
@@ -530,7 +550,7 @@
.close = l2tp_ip_close,
.bind = l2tp_ip_bind,
.connect = l2tp_ip_connect,
- .disconnect = udp_disconnect,
+ .disconnect = l2tp_ip_disconnect,
.ioctl = udp_ioctl,
.destroy = l2tp_ip_destroy_sock,
.setsockopt = ip_setsockopt,
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
index 0291d8d..35e1e4b 100644
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@ -258,6 +258,10 @@
int addr_type;
int err;
+ if (!sock_flag(sk, SOCK_ZAPPED))
+ return -EINVAL;
+ if (addr->l2tp_family != AF_INET6)
+ return -EINVAL;
if (addr_len < sizeof(*addr))
return -EINVAL;
@@ -331,6 +335,7 @@
sk_del_node_init(sk);
write_unlock_bh(&l2tp_ip6_lock);
+ sock_reset_flag(sk, SOCK_ZAPPED);
release_sock(sk);
return 0;
@@ -354,6 +359,9 @@
int addr_type;
int rc;
+ if (sock_flag(sk, SOCK_ZAPPED)) /* Must bind first - autobinding does not work */
+ return -EINVAL;
+
if (addr_len < sizeof(*lsa))
return -EINVAL;
@@ -383,6 +391,14 @@
return rc;
}
+static int l2tp_ip6_disconnect(struct sock *sk, int flags)
+{
+ if (sock_flag(sk, SOCK_ZAPPED))
+ return 0;
+
+ return udp_disconnect(sk, flags);
+}
+
static int l2tp_ip6_getname(struct socket *sock, struct sockaddr *uaddr,
int *uaddr_len, int peer)
{
@@ -689,7 +705,7 @@
.close = l2tp_ip6_close,
.bind = l2tp_ip6_bind,
.connect = l2tp_ip6_connect,
- .disconnect = udp_disconnect,
+ .disconnect = l2tp_ip6_disconnect,
.ioctl = udp_ioctl,
.destroy = l2tp_ip6_destroy_sock,
.setsockopt = ipv6_setsockopt,
diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
index 8577264..ddc553e 100644
--- a/net/l2tp/l2tp_netlink.c
+++ b/net/l2tp/l2tp_netlink.c
@@ -923,5 +923,4 @@
MODULE_DESCRIPTION("L2TP netlink");
MODULE_LICENSE("GPL");
MODULE_VERSION("1.0");
-MODULE_ALIAS("net-pf-" __stringify(PF_NETLINK) "-proto-" \
- __stringify(NETLINK_GENERIC) "-type-" "l2tp");
+MODULE_ALIAS_GENL_FAMILY("l2tp");
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index b3b3c26..04c3063 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -1522,6 +1522,8 @@
* anymore. The timeout will be reset if the frame is ACKed by
* the AP.
*/
+ ifmgd->probe_send_count++;
+
if (sdata->local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) {
ifmgd->nullfunc_failed = false;
ieee80211_send_nullfunc(sdata->local, sdata, 0);
@@ -1538,7 +1540,6 @@
0, (u32) -1, true, false);
}
- ifmgd->probe_send_count++;
ifmgd->probe_timeout = jiffies + msecs_to_jiffies(probe_wait_ms);
run_again(ifmgd, ifmgd->probe_timeout);
if (sdata->local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 5f827a6..847215b 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -153,7 +153,7 @@
/* Don't calculate ACKs for QoS Frames with NoAck Policy set */
if (ieee80211_is_data_qos(hdr->frame_control) &&
- *(ieee80211_get_qos_ctl(hdr)) | IEEE80211_QOS_CTL_ACK_POLICY_NOACK)
+ *(ieee80211_get_qos_ctl(hdr)) & IEEE80211_QOS_CTL_ACK_POLICY_NOACK)
dur = 0;
else
/* Time needed to transmit ACK
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 22f2216b..a44c680 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -1371,6 +1371,12 @@
}
}
+ /* add back keys */
+ list_for_each_entry(sdata, &local->interfaces, list)
+ if (ieee80211_sdata_running(sdata))
+ ieee80211_enable_keys(sdata);
+
+ wake_up:
/*
* Clear the WLAN_STA_BLOCK_BA flag so new aggregation
* sessions can be established after a resume.
@@ -1392,12 +1398,6 @@
mutex_unlock(&local->sta_mtx);
}
- /* add back keys */
- list_for_each_entry(sdata, &local->interfaces, list)
- if (ieee80211_sdata_running(sdata))
- ieee80211_enable_keys(sdata);
-
- wake_up:
ieee80211_wake_queues_by_reason(hw,
IEEE80211_QUEUE_STOP_REASON_SUSPEND);
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index 8340ace..2cc7c1e 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -836,7 +836,7 @@
#ifdef CONFIG_MODULES
if (res == NULL) {
genl_unlock();
- request_module("net-pf-%d-proto-%d-type-%s",
+ request_module("net-pf-%d-proto-%d-family-%s",
PF_NETLINK, NETLINK_GENERIC, name);
genl_lock();
res = genl_family_find_byname(name);
diff --git a/net/rds/ib.h b/net/rds/ib.h
index edfaaaf..8d2b3d5 100644
--- a/net/rds/ib.h
+++ b/net/rds/ib.h
@@ -186,8 +186,7 @@
struct work_struct free_work;
};
-#define pcidev_to_node(pcidev) pcibus_to_node(pcidev->bus)
-#define ibdev_to_node(ibdev) pcidev_to_node(to_pci_dev(ibdev->dma_device))
+#define ibdev_to_node(ibdev) dev_to_node(ibdev->dma_device)
#define rdsibdev_to_node(rdsibdev) ibdev_to_node(rdsibdev->dev)
/* bits for i_ack_flags */
diff --git a/net/wanrouter/Kconfig b/net/wanrouter/Kconfig
index 61ceae0..a157a2e 100644
--- a/net/wanrouter/Kconfig
+++ b/net/wanrouter/Kconfig
@@ -3,7 +3,7 @@
#
config WAN_ROUTER
- tristate "WAN router"
+ tristate "WAN router (DEPRECATED)"
depends on EXPERIMENTAL
---help---
Wide Area Networks (WANs), such as X.25, frame relay and leased
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index c53e8f4..ccfbd32 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -1921,6 +1921,9 @@
}
ok:
xfrm_pols_put(pols, drop_pols);
+ if (dst && dst->xfrm &&
+ dst->xfrm->props.mode == XFRM_MODE_TUNNEL)
+ dst->flags |= DST_XFRM_TUNNEL;
return dst;
nopol:
diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c
index 9985349..5548282 100644
--- a/tools/lib/traceevent/event-parse.c
+++ b/tools/lib/traceevent/event-parse.c
@@ -1434,8 +1434,11 @@
fail:
free_token(token);
fail_expect:
- if (field)
+ if (field) {
+ free(field->type);
+ free(field->name);
free(field);
+ }
return -1;
}
@@ -1712,6 +1715,8 @@
if (set_op_prio(arg) == -1) {
event->flags |= EVENT_FL_FAILED;
+ /* arg->op.op (= token) will be freed at out_free */
+ arg->op.op = NULL;
goto out_free;
}
@@ -2124,6 +2129,13 @@
free_token(token);
type = process_arg(event, arg, &token);
+
+ if (type == EVENT_OP)
+ type = process_op(event, arg, &token);
+
+ if (type == EVENT_ERROR)
+ goto out_free;
+
if (test_type_token(type, token, EVENT_DELIM, ","))
goto out_free;
@@ -2288,17 +2300,18 @@
arg = alloc_arg();
type = process_arg(event, arg, &token);
if (type == EVENT_ERROR)
- goto out_free;
+ goto out_free_arg;
if (!test_type_token(type, token, EVENT_OP, "]"))
- goto out_free;
+ goto out_free_arg;
free_token(token);
type = read_token_item(tok);
return type;
+ out_free_arg:
+ free_arg(arg);
out_free:
- free(arg);
free_token(token);
*tok = NULL;
return EVENT_ERROR;
@@ -3362,6 +3375,7 @@
break;
}
farg = farg->next;
+ param = param->next;
}
ret = (*func_handle->func)(s, args);
diff --git a/tools/lib/traceevent/parse-filter.c b/tools/lib/traceevent/parse-filter.c
index 2d40c5e..dfcfe2c1 100644
--- a/tools/lib/traceevent/parse-filter.c
+++ b/tools/lib/traceevent/parse-filter.c
@@ -325,9 +325,8 @@
}
static struct filter_arg *
-create_arg_item(struct event_format *event,
- const char *token, enum filter_arg_type type,
- char **error_str)
+create_arg_item(struct event_format *event, const char *token,
+ enum event_type type, char **error_str)
{
struct format_field *field;
struct filter_arg *arg;
@@ -1585,7 +1584,7 @@
const char *name;
name = get_comm(event, record);
- return (unsigned long long)name;
+ return (unsigned long)name;
}
pevent_read_number_field(field, record->data, &val);
diff --git a/tools/perf/Documentation/perfconfig.example b/tools/perf/Documentation/perfconfig.example
index 42c6fd2..767ea24 100644
--- a/tools/perf/Documentation/perfconfig.example
+++ b/tools/perf/Documentation/perfconfig.example
@@ -19,3 +19,11 @@
# Default, disable using /dev/null
dir = /root/.debug
+
+[annotate]
+
+ # Defaults
+ hide_src_code = false
+ use_offset = true
+ jump_arrows = true
+ show_nr_jumps = false
diff --git a/tools/perf/Makefile b/tools/perf/Makefile
index 1d3d513..0eee64c 100644
--- a/tools/perf/Makefile
+++ b/tools/perf/Makefile
@@ -80,7 +80,7 @@
PERF_DEBUG = $(DEBUG)
endif
ifndef PERF_DEBUG
- CFLAGS_OPTIMIZE = -O6
+ CFLAGS_OPTIMIZE = -O6 -D_FORTIFY_SOURCE=2
endif
ifdef PARSER_DEBUG
@@ -89,7 +89,7 @@
PARSER_DEBUG_CFLAGS := -DPARSER_DEBUG
endif
-CFLAGS = -fno-omit-frame-pointer -ggdb3 -Wall -Wextra -std=gnu99 $(CFLAGS_WERROR) $(CFLAGS_OPTIMIZE) -D_FORTIFY_SOURCE=2 $(EXTRA_WARNINGS) $(EXTRA_CFLAGS) $(PARSER_DEBUG_CFLAGS)
+CFLAGS = -fno-omit-frame-pointer -ggdb3 -Wall -Wextra -std=gnu99 $(CFLAGS_WERROR) $(CFLAGS_OPTIMIZE) $(EXTRA_WARNINGS) $(EXTRA_CFLAGS) $(PARSER_DEBUG_CFLAGS)
EXTLIBS = -lpthread -lrt -lelf -lm
ALL_CFLAGS = $(CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE
ALL_LDFLAGS = $(LDFLAGS)
diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c
index 806e0a2..67522cf 100644
--- a/tools/perf/builtin-annotate.c
+++ b/tools/perf/builtin-annotate.c
@@ -215,7 +215,7 @@
}
if (total_nr_samples == 0) {
- ui__warning("The %s file has no samples!\n", session->filename);
+ ui__error("The %s file has no samples!\n", session->filename);
goto out_delete;
}
out_delete:
diff --git a/tools/perf/builtin-evlist.c b/tools/perf/builtin-evlist.c
index e52d77e..acd78dc 100644
--- a/tools/perf/builtin-evlist.c
+++ b/tools/perf/builtin-evlist.c
@@ -116,7 +116,7 @@
int cmd_evlist(int argc, const char **argv, const char *prefix __used)
{
struct perf_attr_details details = { .verbose = false, };
- const char *input_name;
+ const char *input_name = NULL;
const struct option options[] = {
OPT_STRING('i', "input", &input_name, "file",
"Input file name"),
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index e5cb084..f95840d 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -264,7 +264,7 @@
}
if (err == ENOENT) {
- ui__warning("The %s event is not supported.\n",
+ ui__error("The %s event is not supported.\n",
event_name(pos));
exit(EXIT_FAILURE);
}
@@ -858,8 +858,8 @@
usage_with_options(record_usage, record_options);
if (rec->force && rec->append_file) {
- fprintf(stderr, "Can't overwrite and append at the same time."
- " You need to choose between -f and -A");
+ ui__error("Can't overwrite and append at the same time."
+ " You need to choose between -f and -A");
usage_with_options(record_usage, record_options);
} else if (rec->append_file) {
rec->write_mode = WRITE_APPEND;
@@ -868,8 +868,8 @@
}
if (nr_cgroups && !rec->opts.target.system_wide) {
- fprintf(stderr, "cgroup monitoring only available in"
- " system-wide mode\n");
+ ui__error("cgroup monitoring only available in"
+ " system-wide mode\n");
usage_with_options(record_usage, record_options);
}
@@ -905,7 +905,7 @@
int saved_errno = errno;
perf_target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
- ui__warning("%s", errbuf);
+ ui__error("%s", errbuf);
err = -saved_errno;
goto out_free_fd;
@@ -933,7 +933,7 @@
else if (rec->opts.freq) {
rec->opts.default_interval = rec->opts.freq;
} else {
- fprintf(stderr, "frequency and count are zero, aborting\n");
+ ui__error("frequency and count are zero, aborting\n");
err = -EINVAL;
goto out_free_fd;
}
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index d58e414..8c767c6 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -251,13 +251,13 @@
if (!(self->sample_type & PERF_SAMPLE_CALLCHAIN)) {
if (sort__has_parent) {
- ui__warning("Selected --sort parent, but no "
+ ui__error("Selected --sort parent, but no "
"callchain data. Did you call "
"'perf record' without -g?\n");
return -EINVAL;
}
if (symbol_conf.use_callchain) {
- ui__warning("Selected -g but no callchain data. Did "
+ ui__error("Selected -g but no callchain data. Did "
"you call 'perf record' without -g?\n");
return -1;
}
@@ -266,17 +266,15 @@
!symbol_conf.use_callchain) {
symbol_conf.use_callchain = true;
if (callchain_register_param(&callchain_param) < 0) {
- ui__warning("Can't register callchain "
- "params.\n");
+ ui__error("Can't register callchain params.\n");
return -EINVAL;
}
}
if (sort__branch_mode == 1) {
if (!(self->sample_type & PERF_SAMPLE_BRANCH_STACK)) {
- fprintf(stderr, "selected -b but no branch data."
- " Did you call perf record without"
- " -b?\n");
+ ui__error("Selected -b but no branch data. "
+ "Did you call perf record without -b?\n");
return -1;
}
}
@@ -420,7 +418,7 @@
}
if (nr_samples == 0) {
- ui__warning("The %s file has no samples!\n", session->filename);
+ ui__error("The %s file has no samples!\n", session->filename);
goto out_delete;
}
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 6031dce..871b540 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -953,22 +953,22 @@
attr->config = PERF_COUNT_SW_CPU_CLOCK;
if (counter->name) {
free(counter->name);
- counter->name = strdup(event_name(counter));
+ counter->name = NULL;
}
goto try_again;
}
if (err == ENOENT) {
- ui__warning("The %s event is not supported.\n",
+ ui__error("The %s event is not supported.\n",
event_name(counter));
goto out_err;
} else if (err == EMFILE) {
- ui__warning("Too many events are opened.\n"
+ ui__error("Too many events are opened.\n"
"Try again after reducing the number of events\n");
goto out_err;
}
- ui__warning("The sys_perf_event_open() syscall "
+ ui__error("The sys_perf_event_open() syscall "
"returned with %d (%s). /bin/dmesg "
"may provide additional information.\n"
"No CONFIG_PERF_EVENTS=y kernel support "
@@ -978,7 +978,7 @@
}
if (perf_evlist__mmap(evlist, top->mmap_pages, false) < 0) {
- ui__warning("Failed to mmap with %d (%s)\n",
+ ui__error("Failed to mmap with %d (%s)\n",
errno, strerror(errno));
goto out_err;
}
@@ -994,12 +994,12 @@
{
if (!top->sort_has_symbols) {
if (symbol_conf.use_callchain) {
- ui__warning("Selected -g but \"sym\" not present in --sort/-s.");
+ ui__error("Selected -g but \"sym\" not present in --sort/-s.");
return -EINVAL;
}
} else if (!top->dont_use_callchains && callchain_param.mode != CHAIN_NONE) {
if (callchain_register_param(&callchain_param) < 0) {
- ui__warning("Can't register callchain params.\n");
+ ui__error("Can't register callchain params.\n");
return -EINVAL;
}
}
@@ -1041,7 +1041,7 @@
if (pthread_create(&thread, NULL, (use_browser > 0 ? display_thread_tui :
display_thread), top)) {
- printf("Could not create display thread.\n");
+ ui__error("Could not create display thread.\n");
exit(-1);
}
@@ -1050,7 +1050,7 @@
param.sched_priority = top->realtime_prio;
if (sched_setscheduler(0, SCHED_FIFO, ¶m)) {
- printf("Could not set realtime priority.\n");
+ ui__error("Could not set realtime priority.\n");
exit(-1);
}
}
@@ -1274,7 +1274,7 @@
int saved_errno = errno;
perf_target__strerror(&top.target, status, errbuf, BUFSIZ);
- ui__warning("%s", errbuf);
+ ui__error("%s", errbuf);
status = -saved_errno;
goto out_delete_evlist;
@@ -1288,7 +1288,7 @@
if (!top.evlist->nr_entries &&
perf_evlist__add_default(top.evlist) < 0) {
- pr_err("Not enough memory for event selector list\n");
+ ui__error("Not enough memory for event selector list\n");
return -ENOMEM;
}
@@ -1305,7 +1305,7 @@
else if (top.freq) {
top.default_interval = top.freq;
} else {
- fprintf(stderr, "frequency and count are zero, aborting\n");
+ ui__error("frequency and count are zero, aborting\n");
exit(EXIT_FAILURE);
}
diff --git a/tools/perf/perf.h b/tools/perf/perf.h
index 14f1034..f960ccb 100644
--- a/tools/perf/perf.h
+++ b/tools/perf/perf.h
@@ -227,7 +227,7 @@
unsigned int freq;
unsigned int mmap_pages;
unsigned int user_freq;
- int branch_stack;
+ u64 branch_stack;
u64 default_interval;
u64 user_interval;
};
diff --git a/tools/perf/ui/browser.c b/tools/perf/ui/browser.c
index cde4d0f..1818a53 100644
--- a/tools/perf/ui/browser.c
+++ b/tools/perf/ui/browser.c
@@ -35,16 +35,16 @@
return ret;
}
-void ui_browser__set_percent_color(struct ui_browser *self,
+void ui_browser__set_percent_color(struct ui_browser *browser,
double percent, bool current)
{
- int color = ui_browser__percent_color(self, percent, current);
- ui_browser__set_color(self, color);
+ int color = ui_browser__percent_color(browser, percent, current);
+ ui_browser__set_color(browser, color);
}
-void ui_browser__gotorc(struct ui_browser *self, int y, int x)
+void ui_browser__gotorc(struct ui_browser *browser, int y, int x)
{
- SLsmg_gotorc(self->y + y, self->x + x);
+ SLsmg_gotorc(browser->y + y, browser->x + x);
}
static struct list_head *
@@ -73,23 +73,23 @@
return NULL;
}
-void ui_browser__list_head_seek(struct ui_browser *self, off_t offset, int whence)
+void ui_browser__list_head_seek(struct ui_browser *browser, off_t offset, int whence)
{
- struct list_head *head = self->entries;
+ struct list_head *head = browser->entries;
struct list_head *pos;
- if (self->nr_entries == 0)
+ if (browser->nr_entries == 0)
return;
switch (whence) {
case SEEK_SET:
- pos = ui_browser__list_head_filter_entries(self, head->next);
+ pos = ui_browser__list_head_filter_entries(browser, head->next);
break;
case SEEK_CUR:
- pos = self->top;
+ pos = browser->top;
break;
case SEEK_END:
- pos = ui_browser__list_head_filter_prev_entries(self, head->prev);
+ pos = ui_browser__list_head_filter_prev_entries(browser, head->prev);
break;
default:
return;
@@ -99,18 +99,18 @@
if (offset > 0) {
while (offset-- != 0)
- pos = ui_browser__list_head_filter_entries(self, pos->next);
+ pos = ui_browser__list_head_filter_entries(browser, pos->next);
} else {
while (offset++ != 0)
- pos = ui_browser__list_head_filter_prev_entries(self, pos->prev);
+ pos = ui_browser__list_head_filter_prev_entries(browser, pos->prev);
}
- self->top = pos;
+ browser->top = pos;
}
-void ui_browser__rb_tree_seek(struct ui_browser *self, off_t offset, int whence)
+void ui_browser__rb_tree_seek(struct ui_browser *browser, off_t offset, int whence)
{
- struct rb_root *root = self->entries;
+ struct rb_root *root = browser->entries;
struct rb_node *nd;
switch (whence) {
@@ -118,7 +118,7 @@
nd = rb_first(root);
break;
case SEEK_CUR:
- nd = self->top;
+ nd = browser->top;
break;
case SEEK_END:
nd = rb_last(root);
@@ -135,23 +135,23 @@
nd = rb_prev(nd);
}
- self->top = nd;
+ browser->top = nd;
}
-unsigned int ui_browser__rb_tree_refresh(struct ui_browser *self)
+unsigned int ui_browser__rb_tree_refresh(struct ui_browser *browser)
{
struct rb_node *nd;
int row = 0;
- if (self->top == NULL)
- self->top = rb_first(self->entries);
+ if (browser->top == NULL)
+ browser->top = rb_first(browser->entries);
- nd = self->top;
+ nd = browser->top;
while (nd != NULL) {
- ui_browser__gotorc(self, row, 0);
- self->write(self, nd, row);
- if (++row == self->height)
+ ui_browser__gotorc(browser, row, 0);
+ browser->write(browser, nd, row);
+ if (++row == browser->height)
break;
nd = rb_next(nd);
}
@@ -159,17 +159,17 @@
return row;
}
-bool ui_browser__is_current_entry(struct ui_browser *self, unsigned row)
+bool ui_browser__is_current_entry(struct ui_browser *browser, unsigned row)
{
- return self->top_idx + row == self->index;
+ return browser->top_idx + row == browser->index;
}
-void ui_browser__refresh_dimensions(struct ui_browser *self)
+void ui_browser__refresh_dimensions(struct ui_browser *browser)
{
- self->width = SLtt_Screen_Cols - 1;
- self->height = SLtt_Screen_Rows - 2;
- self->y = 1;
- self->x = 0;
+ browser->width = SLtt_Screen_Cols - 1;
+ browser->height = SLtt_Screen_Rows - 2;
+ browser->y = 1;
+ browser->x = 0;
}
void ui_browser__handle_resize(struct ui_browser *browser)
@@ -225,10 +225,10 @@
return key == K_ENTER || toupper(key) == 'Y';
}
-void ui_browser__reset_index(struct ui_browser *self)
+void ui_browser__reset_index(struct ui_browser *browser)
{
- self->index = self->top_idx = 0;
- self->seek(self, 0, SEEK_SET);
+ browser->index = browser->top_idx = 0;
+ browser->seek(browser, 0, SEEK_SET);
}
void __ui_browser__show_title(struct ui_browser *browser, const char *title)
@@ -245,26 +245,26 @@
pthread_mutex_unlock(&ui__lock);
}
-int ui_browser__show(struct ui_browser *self, const char *title,
+int ui_browser__show(struct ui_browser *browser, const char *title,
const char *helpline, ...)
{
int err;
va_list ap;
- ui_browser__refresh_dimensions(self);
+ ui_browser__refresh_dimensions(browser);
pthread_mutex_lock(&ui__lock);
- __ui_browser__show_title(self, title);
+ __ui_browser__show_title(browser, title);
- self->title = title;
- free(self->helpline);
- self->helpline = NULL;
+ browser->title = title;
+ free(browser->helpline);
+ browser->helpline = NULL;
va_start(ap, helpline);
- err = vasprintf(&self->helpline, helpline, ap);
+ err = vasprintf(&browser->helpline, helpline, ap);
va_end(ap);
if (err > 0)
- ui_helpline__push(self->helpline);
+ ui_helpline__push(browser->helpline);
pthread_mutex_unlock(&ui__lock);
return err ? 0 : -1;
}
@@ -350,7 +350,7 @@
browser->seek(browser, browser->top_idx, SEEK_SET);
}
-int ui_browser__run(struct ui_browser *self, int delay_secs)
+int ui_browser__run(struct ui_browser *browser, int delay_secs)
{
int err, key;
@@ -358,7 +358,7 @@
off_t offset;
pthread_mutex_lock(&ui__lock);
- err = __ui_browser__refresh(self);
+ err = __ui_browser__refresh(browser);
SLsmg_refresh();
pthread_mutex_unlock(&ui__lock);
if (err < 0)
@@ -368,18 +368,18 @@
if (key == K_RESIZE) {
ui__refresh_dimensions(false);
- ui_browser__refresh_dimensions(self);
- __ui_browser__show_title(self, self->title);
- ui_helpline__puts(self->helpline);
+ ui_browser__refresh_dimensions(browser);
+ __ui_browser__show_title(browser, browser->title);
+ ui_helpline__puts(browser->helpline);
continue;
}
- if (self->use_navkeypressed && !self->navkeypressed) {
+ if (browser->use_navkeypressed && !browser->navkeypressed) {
if (key == K_DOWN || key == K_UP ||
key == K_PGDN || key == K_PGUP ||
key == K_HOME || key == K_END ||
key == ' ') {
- self->navkeypressed = true;
+ browser->navkeypressed = true;
continue;
} else
return key;
@@ -387,59 +387,59 @@
switch (key) {
case K_DOWN:
- if (self->index == self->nr_entries - 1)
+ if (browser->index == browser->nr_entries - 1)
break;
- ++self->index;
- if (self->index == self->top_idx + self->height) {
- ++self->top_idx;
- self->seek(self, +1, SEEK_CUR);
+ ++browser->index;
+ if (browser->index == browser->top_idx + browser->height) {
+ ++browser->top_idx;
+ browser->seek(browser, +1, SEEK_CUR);
}
break;
case K_UP:
- if (self->index == 0)
+ if (browser->index == 0)
break;
- --self->index;
- if (self->index < self->top_idx) {
- --self->top_idx;
- self->seek(self, -1, SEEK_CUR);
+ --browser->index;
+ if (browser->index < browser->top_idx) {
+ --browser->top_idx;
+ browser->seek(browser, -1, SEEK_CUR);
}
break;
case K_PGDN:
case ' ':
- if (self->top_idx + self->height > self->nr_entries - 1)
+ if (browser->top_idx + browser->height > browser->nr_entries - 1)
break;
- offset = self->height;
- if (self->index + offset > self->nr_entries - 1)
- offset = self->nr_entries - 1 - self->index;
- self->index += offset;
- self->top_idx += offset;
- self->seek(self, +offset, SEEK_CUR);
+ offset = browser->height;
+ if (browser->index + offset > browser->nr_entries - 1)
+ offset = browser->nr_entries - 1 - browser->index;
+ browser->index += offset;
+ browser->top_idx += offset;
+ browser->seek(browser, +offset, SEEK_CUR);
break;
case K_PGUP:
- if (self->top_idx == 0)
+ if (browser->top_idx == 0)
break;
- if (self->top_idx < self->height)
- offset = self->top_idx;
+ if (browser->top_idx < browser->height)
+ offset = browser->top_idx;
else
- offset = self->height;
+ offset = browser->height;
- self->index -= offset;
- self->top_idx -= offset;
- self->seek(self, -offset, SEEK_CUR);
+ browser->index -= offset;
+ browser->top_idx -= offset;
+ browser->seek(browser, -offset, SEEK_CUR);
break;
case K_HOME:
- ui_browser__reset_index(self);
+ ui_browser__reset_index(browser);
break;
case K_END:
- offset = self->height - 1;
- if (offset >= self->nr_entries)
- offset = self->nr_entries - 1;
+ offset = browser->height - 1;
+ if (offset >= browser->nr_entries)
+ offset = browser->nr_entries - 1;
- self->index = self->nr_entries - 1;
- self->top_idx = self->index - offset;
- self->seek(self, -offset, SEEK_END);
+ browser->index = browser->nr_entries - 1;
+ browser->top_idx = browser->index - offset;
+ browser->seek(browser, -offset, SEEK_END);
break;
default:
return key;
@@ -448,22 +448,22 @@
return -1;
}
-unsigned int ui_browser__list_head_refresh(struct ui_browser *self)
+unsigned int ui_browser__list_head_refresh(struct ui_browser *browser)
{
struct list_head *pos;
- struct list_head *head = self->entries;
+ struct list_head *head = browser->entries;
int row = 0;
- if (self->top == NULL || self->top == self->entries)
- self->top = ui_browser__list_head_filter_entries(self, head->next);
+ if (browser->top == NULL || browser->top == browser->entries)
+ browser->top = ui_browser__list_head_filter_entries(browser, head->next);
- pos = self->top;
+ pos = browser->top;
list_for_each_from(pos, head) {
- if (!self->filter || !self->filter(self, pos)) {
- ui_browser__gotorc(self, row, 0);
- self->write(self, pos, row);
- if (++row == self->height)
+ if (!browser->filter || !browser->filter(browser, pos)) {
+ ui_browser__gotorc(browser, row, 0);
+ browser->write(browser, pos, row);
+ if (++row == browser->height)
break;
}
}
@@ -708,4 +708,6 @@
struct ui_browser__colorset *c = &ui_browser__colorsets[i++];
sltt_set_color(c->colorset, c->name, c->fg, c->bg);
}
+
+ annotate_browser__init();
}
diff --git a/tools/perf/ui/browser.h b/tools/perf/ui/browser.h
index dd96d82..af70314 100644
--- a/tools/perf/ui/browser.h
+++ b/tools/perf/ui/browser.h
@@ -69,4 +69,5 @@
unsigned int ui_browser__list_head_refresh(struct ui_browser *self);
void ui_browser__init(void);
+void annotate_browser__init(void);
#endif /* _PERF_UI_BROWSER_H_ */
diff --git a/tools/perf/ui/browsers/annotate.c b/tools/perf/ui/browsers/annotate.c
index 6e0ef79..4deea6a 100644
--- a/tools/perf/ui/browsers/annotate.c
+++ b/tools/perf/ui/browsers/annotate.c
@@ -19,6 +19,16 @@
int jump_sources;
};
+static struct annotate_browser_opt {
+ bool hide_src_code,
+ use_offset,
+ jump_arrows,
+ show_nr_jumps;
+} annotate_browser__opts = {
+ .use_offset = true,
+ .jump_arrows = true,
+};
+
struct annotate_browser {
struct ui_browser b;
struct rb_root entries;
@@ -30,10 +40,6 @@
int nr_entries;
int max_jump_sources;
int nr_jumps;
- bool hide_src_code;
- bool use_offset;
- bool jump_arrows;
- bool show_nr_jumps;
bool searching_backwards;
u8 addr_width;
u8 jumps_width;
@@ -48,11 +54,9 @@
return (struct browser_disasm_line *)(dl + 1);
}
-static bool disasm_line__filter(struct ui_browser *browser, void *entry)
+static bool disasm_line__filter(struct ui_browser *browser __used, void *entry)
{
- struct annotate_browser *ab = container_of(browser, struct annotate_browser, b);
-
- if (ab->hide_src_code) {
+ if (annotate_browser__opts.hide_src_code) {
struct disasm_line *dl = list_entry(entry, struct disasm_line, node);
return dl->offset == -1;
}
@@ -79,30 +83,30 @@
return ui_browser__set_color(&browser->b, color);
}
-static void annotate_browser__write(struct ui_browser *self, void *entry, int row)
+static void annotate_browser__write(struct ui_browser *browser, void *entry, int row)
{
- struct annotate_browser *ab = container_of(self, struct annotate_browser, b);
+ struct annotate_browser *ab = container_of(browser, struct annotate_browser, b);
struct disasm_line *dl = list_entry(entry, struct disasm_line, node);
struct browser_disasm_line *bdl = disasm_line__browser(dl);
- bool current_entry = ui_browser__is_current_entry(self, row);
- bool change_color = (!ab->hide_src_code &&
- (!current_entry || (self->use_navkeypressed &&
- !self->navkeypressed)));
- int width = self->width, printed;
+ bool current_entry = ui_browser__is_current_entry(browser, row);
+ bool change_color = (!annotate_browser__opts.hide_src_code &&
+ (!current_entry || (browser->use_navkeypressed &&
+ !browser->navkeypressed)));
+ int width = browser->width, printed;
char bf[256];
if (dl->offset != -1 && bdl->percent != 0.0) {
- ui_browser__set_percent_color(self, bdl->percent, current_entry);
+ ui_browser__set_percent_color(browser, bdl->percent, current_entry);
slsmg_printf("%6.2f ", bdl->percent);
} else {
- ui_browser__set_percent_color(self, 0, current_entry);
+ ui_browser__set_percent_color(browser, 0, current_entry);
slsmg_write_nstring(" ", 7);
}
SLsmg_write_char(' ');
/* The scroll bar isn't being used */
- if (!self->navkeypressed)
+ if (!browser->navkeypressed)
width += 1;
if (!*dl->line)
@@ -116,14 +120,14 @@
u64 addr = dl->offset;
int color = -1;
- if (!ab->use_offset)
+ if (!annotate_browser__opts.use_offset)
addr += ab->start;
- if (!ab->use_offset) {
+ if (!annotate_browser__opts.use_offset) {
printed = scnprintf(bf, sizeof(bf), "%" PRIx64 ": ", addr);
} else {
if (bdl->jump_sources) {
- if (ab->show_nr_jumps) {
+ if (annotate_browser__opts.show_nr_jumps) {
int prev;
printed = scnprintf(bf, sizeof(bf), "%*d ",
ab->jumps_width,
@@ -131,7 +135,7 @@
prev = annotate_browser__set_jumps_percent_color(ab, bdl->jump_sources,
current_entry);
slsmg_write_nstring(bf, printed);
- ui_browser__set_color(self, prev);
+ ui_browser__set_color(browser, prev);
}
printed = scnprintf(bf, sizeof(bf), "%*" PRIx64 ": ",
@@ -143,19 +147,19 @@
}
if (change_color)
- color = ui_browser__set_color(self, HE_COLORSET_ADDR);
+ color = ui_browser__set_color(browser, HE_COLORSET_ADDR);
slsmg_write_nstring(bf, printed);
if (change_color)
- ui_browser__set_color(self, color);
+ ui_browser__set_color(browser, color);
if (dl->ins && dl->ins->ops->scnprintf) {
if (ins__is_jump(dl->ins)) {
bool fwd = dl->ops.target.offset > (u64)dl->offset;
- ui_browser__write_graph(self, fwd ? SLSMG_DARROW_CHAR :
+ ui_browser__write_graph(browser, fwd ? SLSMG_DARROW_CHAR :
SLSMG_UARROW_CHAR);
SLsmg_write_char(' ');
} else if (ins__is_call(dl->ins)) {
- ui_browser__write_graph(self, SLSMG_RARROW_CHAR);
+ ui_browser__write_graph(browser, SLSMG_RARROW_CHAR);
SLsmg_write_char(' ');
} else {
slsmg_write_nstring(" ", 2);
@@ -164,12 +168,12 @@
if (strcmp(dl->name, "retq")) {
slsmg_write_nstring(" ", 2);
} else {
- ui_browser__write_graph(self, SLSMG_LARROW_CHAR);
+ ui_browser__write_graph(browser, SLSMG_LARROW_CHAR);
SLsmg_write_char(' ');
}
}
- disasm_line__scnprintf(dl, bf, sizeof(bf), !ab->use_offset);
+ disasm_line__scnprintf(dl, bf, sizeof(bf), !annotate_browser__opts.use_offset);
slsmg_write_nstring(bf, width - 10 - printed);
}
@@ -184,7 +188,7 @@
struct browser_disasm_line *btarget, *bcursor;
unsigned int from, to;
- if (!cursor->ins || !ins__is_jump(cursor->ins) ||
+ if (!cursor || !cursor->ins || !ins__is_jump(cursor->ins) ||
!disasm_line__has_offset(cursor))
return;
@@ -195,7 +199,7 @@
bcursor = disasm_line__browser(cursor);
btarget = disasm_line__browser(target);
- if (ab->hide_src_code) {
+ if (annotate_browser__opts.hide_src_code) {
from = bcursor->idx_asm;
to = btarget->idx_asm;
} else {
@@ -209,10 +213,9 @@
static unsigned int annotate_browser__refresh(struct ui_browser *browser)
{
- struct annotate_browser *ab = container_of(browser, struct annotate_browser, b);
int ret = ui_browser__list_head_refresh(browser);
- if (ab->jump_arrows)
+ if (annotate_browser__opts.jump_arrows)
annotate_browser__draw_current_jump(browser);
ui_browser__set_color(browser, HE_COLORSET_NORMAL);
@@ -272,27 +275,27 @@
rb_insert_color(&bdl->rb_node, root);
}
-static void annotate_browser__set_top(struct annotate_browser *self,
+static void annotate_browser__set_top(struct annotate_browser *browser,
struct disasm_line *pos, u32 idx)
{
unsigned back;
- ui_browser__refresh_dimensions(&self->b);
- back = self->b.height / 2;
- self->b.top_idx = self->b.index = idx;
+ ui_browser__refresh_dimensions(&browser->b);
+ back = browser->b.height / 2;
+ browser->b.top_idx = browser->b.index = idx;
- while (self->b.top_idx != 0 && back != 0) {
+ while (browser->b.top_idx != 0 && back != 0) {
pos = list_entry(pos->node.prev, struct disasm_line, node);
- if (disasm_line__filter(&self->b, &pos->node))
+ if (disasm_line__filter(&browser->b, &pos->node))
continue;
- --self->b.top_idx;
+ --browser->b.top_idx;
--back;
}
- self->b.top = pos;
- self->b.navkeypressed = true;
+ browser->b.top = pos;
+ browser->b.navkeypressed = true;
}
static void annotate_browser__set_rb_top(struct annotate_browser *browser,
@@ -300,10 +303,14 @@
{
struct browser_disasm_line *bpos;
struct disasm_line *pos;
+ u32 idx;
bpos = rb_entry(nd, struct browser_disasm_line, rb_node);
pos = ((struct disasm_line *)bpos) - 1;
- annotate_browser__set_top(browser, pos, bpos->idx);
+ idx = bpos->idx;
+ if (annotate_browser__opts.hide_src_code)
+ idx = bpos->idx_asm;
+ annotate_browser__set_top(browser, pos, idx);
browser->curr_hot = nd;
}
@@ -343,12 +350,12 @@
dl = list_entry(browser->b.top, struct disasm_line, node);
bdl = disasm_line__browser(dl);
- if (browser->hide_src_code) {
+ if (annotate_browser__opts.hide_src_code) {
if (bdl->idx_asm < offset)
offset = bdl->idx;
browser->b.nr_entries = browser->nr_entries;
- browser->hide_src_code = false;
+ annotate_browser__opts.hide_src_code = false;
browser->b.seek(&browser->b, -offset, SEEK_CUR);
browser->b.top_idx = bdl->idx - offset;
browser->b.index = bdl->idx;
@@ -363,7 +370,7 @@
offset = bdl->idx_asm;
browser->b.nr_entries = browser->nr_asm_entries;
- browser->hide_src_code = true;
+ annotate_browser__opts.hide_src_code = true;
browser->b.seek(&browser->b, -offset, SEEK_CUR);
browser->b.top_idx = bdl->idx_asm - offset;
browser->b.index = bdl->idx_asm;
@@ -372,6 +379,12 @@
return true;
}
+static void annotate_browser__init_asm_mode(struct annotate_browser *browser)
+{
+ ui_browser__reset_index(&browser->b);
+ browser->b.nr_entries = browser->nr_asm_entries;
+}
+
static bool annotate_browser__callq(struct annotate_browser *browser,
int evidx, void (*timer)(void *arg),
void *arg, int delay_secs)
@@ -574,33 +587,46 @@
return __annotate_browser__search_reverse(browser);
}
-static int annotate_browser__run(struct annotate_browser *self, int evidx,
+static void annotate_browser__update_addr_width(struct annotate_browser *browser)
+{
+ if (annotate_browser__opts.use_offset)
+ browser->target_width = browser->min_addr_width;
+ else
+ browser->target_width = browser->max_addr_width;
+
+ browser->addr_width = browser->target_width;
+
+ if (annotate_browser__opts.show_nr_jumps)
+ browser->addr_width += browser->jumps_width + 1;
+}
+
+static int annotate_browser__run(struct annotate_browser *browser, int evidx,
void(*timer)(void *arg),
void *arg, int delay_secs)
{
struct rb_node *nd = NULL;
- struct map_symbol *ms = self->b.priv;
+ struct map_symbol *ms = browser->b.priv;
struct symbol *sym = ms->sym;
const char *help = "Press 'h' for help on key bindings";
int key;
- if (ui_browser__show(&self->b, sym->name, help) < 0)
+ if (ui_browser__show(&browser->b, sym->name, help) < 0)
return -1;
- annotate_browser__calc_percent(self, evidx);
+ annotate_browser__calc_percent(browser, evidx);
- if (self->curr_hot) {
- annotate_browser__set_rb_top(self, self->curr_hot);
- self->b.navkeypressed = false;
+ if (browser->curr_hot) {
+ annotate_browser__set_rb_top(browser, browser->curr_hot);
+ browser->b.navkeypressed = false;
}
- nd = self->curr_hot;
+ nd = browser->curr_hot;
while (1) {
- key = ui_browser__run(&self->b, delay_secs);
+ key = ui_browser__run(&browser->b, delay_secs);
if (delay_secs != 0) {
- annotate_browser__calc_percent(self, evidx);
+ annotate_browser__calc_percent(browser, evidx);
/*
* Current line focus got out of the list of most active
* lines, NULL it so that if TAB|UNTAB is pressed, we
@@ -622,21 +648,21 @@
if (nd != NULL) {
nd = rb_prev(nd);
if (nd == NULL)
- nd = rb_last(&self->entries);
+ nd = rb_last(&browser->entries);
} else
- nd = self->curr_hot;
+ nd = browser->curr_hot;
break;
case K_UNTAB:
if (nd != NULL)
nd = rb_next(nd);
if (nd == NULL)
- nd = rb_first(&self->entries);
+ nd = rb_first(&browser->entries);
else
- nd = self->curr_hot;
+ nd = browser->curr_hot;
break;
case K_F1:
case 'h':
- ui_browser__help_window(&self->b,
+ ui_browser__help_window(&browser->b,
"UP/DOWN/PGUP\n"
"PGDN/SPACE Navigate\n"
"q/ESC/CTRL+C Exit\n\n"
@@ -652,57 +678,62 @@
"? Search previous string\n");
continue;
case 'H':
- nd = self->curr_hot;
+ nd = browser->curr_hot;
break;
case 's':
- if (annotate_browser__toggle_source(self))
+ if (annotate_browser__toggle_source(browser))
ui_helpline__puts(help);
continue;
case 'o':
- self->use_offset = !self->use_offset;
- if (self->use_offset)
- self->target_width = self->min_addr_width;
- else
- self->target_width = self->max_addr_width;
-update_addr_width:
- self->addr_width = self->target_width;
- if (self->show_nr_jumps)
- self->addr_width += self->jumps_width + 1;
+ annotate_browser__opts.use_offset = !annotate_browser__opts.use_offset;
+ annotate_browser__update_addr_width(browser);
continue;
case 'j':
- self->jump_arrows = !self->jump_arrows;
+ annotate_browser__opts.jump_arrows = !annotate_browser__opts.jump_arrows;
continue;
case 'J':
- self->show_nr_jumps = !self->show_nr_jumps;
- goto update_addr_width;
+ annotate_browser__opts.show_nr_jumps = !annotate_browser__opts.show_nr_jumps;
+ annotate_browser__update_addr_width(browser);
+ continue;
case '/':
- if (annotate_browser__search(self, delay_secs)) {
+ if (annotate_browser__search(browser, delay_secs)) {
show_help:
ui_helpline__puts(help);
}
continue;
case 'n':
- if (self->searching_backwards ?
- annotate_browser__continue_search_reverse(self, delay_secs) :
- annotate_browser__continue_search(self, delay_secs))
+ if (browser->searching_backwards ?
+ annotate_browser__continue_search_reverse(browser, delay_secs) :
+ annotate_browser__continue_search(browser, delay_secs))
goto show_help;
continue;
case '?':
- if (annotate_browser__search_reverse(self, delay_secs))
+ if (annotate_browser__search_reverse(browser, delay_secs))
goto show_help;
continue;
+ case 'D': {
+ static int seq;
+ ui_helpline__pop();
+ ui_helpline__fpush("%d: nr_ent=%d, height=%d, idx=%d, top_idx=%d, nr_asm_entries=%d",
+ seq++, browser->b.nr_entries,
+ browser->b.height,
+ browser->b.index,
+ browser->b.top_idx,
+ browser->nr_asm_entries);
+ }
+ continue;
case K_ENTER:
case K_RIGHT:
- if (self->selection == NULL)
+ if (browser->selection == NULL)
ui_helpline__puts("Huh? No selection. Report to linux-kernel@vger.kernel.org");
- else if (self->selection->offset == -1)
+ else if (browser->selection->offset == -1)
ui_helpline__puts("Actions are only available for assembly lines.");
- else if (!self->selection->ins) {
- if (strcmp(self->selection->name, "retq"))
+ else if (!browser->selection->ins) {
+ if (strcmp(browser->selection->name, "retq"))
goto show_sup_ins;
goto out;
- } else if (!(annotate_browser__jump(self) ||
- annotate_browser__callq(self, evidx, timer, arg, delay_secs))) {
+ } else if (!(annotate_browser__jump(browser) ||
+ annotate_browser__callq(browser, evidx, timer, arg, delay_secs))) {
show_sup_ins:
ui_helpline__puts("Actions are only available for 'callq', 'retq' & jump instructions.");
}
@@ -717,10 +748,10 @@
}
if (nd != NULL)
- annotate_browser__set_rb_top(self, nd);
+ annotate_browser__set_rb_top(browser, nd);
}
out:
- ui_browser__hide(&self->b);
+ ui_browser__hide(&browser->b);
return key;
}
@@ -797,8 +828,6 @@
.priv = &ms,
.use_navkeypressed = true,
},
- .use_offset = true,
- .jump_arrows = true,
};
int ret = -1;
@@ -855,6 +884,12 @@
browser.b.nr_entries = browser.nr_entries;
browser.b.entries = ¬es->src->source,
browser.b.width += 18; /* Percentage */
+
+ if (annotate_browser__opts.hide_src_code)
+ annotate_browser__init_asm_mode(&browser);
+
+ annotate_browser__update_addr_width(&browser);
+
ret = annotate_browser__run(&browser, evidx, timer, arg, delay_secs);
list_for_each_entry_safe(pos, n, ¬es->src->source, node) {
list_del(&pos->node);
@@ -865,3 +900,52 @@
free(browser.offsets);
return ret;
}
+
+#define ANNOTATE_CFG(n) \
+ { .name = #n, .value = &annotate_browser__opts.n, }
+
+/*
+ * Keep the entries sorted, they are bsearch'ed
+ */
+static struct annotate__config {
+ const char *name;
+ bool *value;
+} annotate__configs[] = {
+ ANNOTATE_CFG(hide_src_code),
+ ANNOTATE_CFG(jump_arrows),
+ ANNOTATE_CFG(show_nr_jumps),
+ ANNOTATE_CFG(use_offset),
+};
+
+#undef ANNOTATE_CFG
+
+static int annotate_config__cmp(const void *name, const void *cfgp)
+{
+ const struct annotate__config *cfg = cfgp;
+
+ return strcmp(name, cfg->name);
+}
+
+static int annotate__config(const char *var, const char *value, void *data __used)
+{
+ struct annotate__config *cfg;
+ const char *name;
+
+ if (prefixcmp(var, "annotate.") != 0)
+ return 0;
+
+ name = var + 9;
+ cfg = bsearch(name, annotate__configs, ARRAY_SIZE(annotate__configs),
+ sizeof(struct annotate__config), annotate_config__cmp);
+
+ if (cfg == NULL)
+ return -1;
+
+ *cfg->value = perf_config_bool(name, value);
+ return 0;
+}
+
+void annotate_browser__init(void)
+{
+ perf_config(annotate__config, NULL);
+}
diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
index a372a4b..53f6697 100644
--- a/tools/perf/ui/browsers/hists.c
+++ b/tools/perf/ui/browsers/hists.c
@@ -26,21 +26,21 @@
bool has_symbols;
};
-static int hists__browser_title(struct hists *self, char *bf, size_t size,
+static int hists__browser_title(struct hists *hists, char *bf, size_t size,
const char *ev_name);
-static void hist_browser__refresh_dimensions(struct hist_browser *self)
+static void hist_browser__refresh_dimensions(struct hist_browser *browser)
{
/* 3 == +/- toggle symbol before actual hist_entry rendering */
- self->b.width = 3 + (hists__sort_list_width(self->hists) +
+ browser->b.width = 3 + (hists__sort_list_width(browser->hists) +
sizeof("[k]"));
}
-static void hist_browser__reset(struct hist_browser *self)
+static void hist_browser__reset(struct hist_browser *browser)
{
- self->b.nr_entries = self->hists->nr_entries;
- hist_browser__refresh_dimensions(self);
- ui_browser__reset_index(&self->b);
+ browser->b.nr_entries = browser->hists->nr_entries;
+ hist_browser__refresh_dimensions(browser);
+ ui_browser__reset_index(&browser->b);
}
static char tree__folded_sign(bool unfolded)
@@ -48,32 +48,32 @@
return unfolded ? '-' : '+';
}
-static char map_symbol__folded(const struct map_symbol *self)
+static char map_symbol__folded(const struct map_symbol *ms)
{
- return self->has_children ? tree__folded_sign(self->unfolded) : ' ';
+ return ms->has_children ? tree__folded_sign(ms->unfolded) : ' ';
}
-static char hist_entry__folded(const struct hist_entry *self)
+static char hist_entry__folded(const struct hist_entry *he)
{
- return map_symbol__folded(&self->ms);
+ return map_symbol__folded(&he->ms);
}
-static char callchain_list__folded(const struct callchain_list *self)
+static char callchain_list__folded(const struct callchain_list *cl)
{
- return map_symbol__folded(&self->ms);
+ return map_symbol__folded(&cl->ms);
}
-static void map_symbol__set_folding(struct map_symbol *self, bool unfold)
+static void map_symbol__set_folding(struct map_symbol *ms, bool unfold)
{
- self->unfolded = unfold ? self->has_children : false;
+ ms->unfolded = unfold ? ms->has_children : false;
}
-static int callchain_node__count_rows_rb_tree(struct callchain_node *self)
+static int callchain_node__count_rows_rb_tree(struct callchain_node *node)
{
int n = 0;
struct rb_node *nd;
- for (nd = rb_first(&self->rb_root); nd; nd = rb_next(nd)) {
+ for (nd = rb_first(&node->rb_root); nd; nd = rb_next(nd)) {
struct callchain_node *child = rb_entry(nd, struct callchain_node, rb_node);
struct callchain_list *chain;
char folded_sign = ' '; /* No children */
@@ -123,23 +123,23 @@
return n;
}
-static bool map_symbol__toggle_fold(struct map_symbol *self)
+static bool map_symbol__toggle_fold(struct map_symbol *ms)
{
- if (!self)
+ if (!ms)
return false;
- if (!self->has_children)
+ if (!ms->has_children)
return false;
- self->unfolded = !self->unfolded;
+ ms->unfolded = !ms->unfolded;
return true;
}
-static void callchain_node__init_have_children_rb_tree(struct callchain_node *self)
+static void callchain_node__init_have_children_rb_tree(struct callchain_node *node)
{
- struct rb_node *nd = rb_first(&self->rb_root);
+ struct rb_node *nd = rb_first(&node->rb_root);
- for (nd = rb_first(&self->rb_root); nd; nd = rb_next(nd)) {
+ for (nd = rb_first(&node->rb_root); nd; nd = rb_next(nd)) {
struct callchain_node *child = rb_entry(nd, struct callchain_node, rb_node);
struct callchain_list *chain;
bool first = true;
@@ -158,49 +158,49 @@
}
}
-static void callchain_node__init_have_children(struct callchain_node *self)
+static void callchain_node__init_have_children(struct callchain_node *node)
{
struct callchain_list *chain;
- list_for_each_entry(chain, &self->val, list)
- chain->ms.has_children = !RB_EMPTY_ROOT(&self->rb_root);
+ list_for_each_entry(chain, &node->val, list)
+ chain->ms.has_children = !RB_EMPTY_ROOT(&node->rb_root);
- callchain_node__init_have_children_rb_tree(self);
+ callchain_node__init_have_children_rb_tree(node);
}
-static void callchain__init_have_children(struct rb_root *self)
+static void callchain__init_have_children(struct rb_root *root)
{
struct rb_node *nd;
- for (nd = rb_first(self); nd; nd = rb_next(nd)) {
+ for (nd = rb_first(root); nd; nd = rb_next(nd)) {
struct callchain_node *node = rb_entry(nd, struct callchain_node, rb_node);
callchain_node__init_have_children(node);
}
}
-static void hist_entry__init_have_children(struct hist_entry *self)
+static void hist_entry__init_have_children(struct hist_entry *he)
{
- if (!self->init_have_children) {
- self->ms.has_children = !RB_EMPTY_ROOT(&self->sorted_chain);
- callchain__init_have_children(&self->sorted_chain);
- self->init_have_children = true;
+ if (!he->init_have_children) {
+ he->ms.has_children = !RB_EMPTY_ROOT(&he->sorted_chain);
+ callchain__init_have_children(&he->sorted_chain);
+ he->init_have_children = true;
}
}
-static bool hist_browser__toggle_fold(struct hist_browser *self)
+static bool hist_browser__toggle_fold(struct hist_browser *browser)
{
- if (map_symbol__toggle_fold(self->selection)) {
- struct hist_entry *he = self->he_selection;
+ if (map_symbol__toggle_fold(browser->selection)) {
+ struct hist_entry *he = browser->he_selection;
hist_entry__init_have_children(he);
- self->hists->nr_entries -= he->nr_rows;
+ browser->hists->nr_entries -= he->nr_rows;
if (he->ms.unfolded)
he->nr_rows = callchain__count_rows(&he->sorted_chain);
else
he->nr_rows = 0;
- self->hists->nr_entries += he->nr_rows;
- self->b.nr_entries = self->hists->nr_entries;
+ browser->hists->nr_entries += he->nr_rows;
+ browser->b.nr_entries = browser->hists->nr_entries;
return true;
}
@@ -209,12 +209,12 @@
return false;
}
-static int callchain_node__set_folding_rb_tree(struct callchain_node *self, bool unfold)
+static int callchain_node__set_folding_rb_tree(struct callchain_node *node, bool unfold)
{
int n = 0;
struct rb_node *nd;
- for (nd = rb_first(&self->rb_root); nd; nd = rb_next(nd)) {
+ for (nd = rb_first(&node->rb_root); nd; nd = rb_next(nd)) {
struct callchain_node *child = rb_entry(nd, struct callchain_node, rb_node);
struct callchain_list *chain;
bool has_children = false;
@@ -263,37 +263,37 @@
return n;
}
-static void hist_entry__set_folding(struct hist_entry *self, bool unfold)
+static void hist_entry__set_folding(struct hist_entry *he, bool unfold)
{
- hist_entry__init_have_children(self);
- map_symbol__set_folding(&self->ms, unfold);
+ hist_entry__init_have_children(he);
+ map_symbol__set_folding(&he->ms, unfold);
- if (self->ms.has_children) {
- int n = callchain__set_folding(&self->sorted_chain, unfold);
- self->nr_rows = unfold ? n : 0;
+ if (he->ms.has_children) {
+ int n = callchain__set_folding(&he->sorted_chain, unfold);
+ he->nr_rows = unfold ? n : 0;
} else
- self->nr_rows = 0;
+ he->nr_rows = 0;
}
-static void hists__set_folding(struct hists *self, bool unfold)
+static void hists__set_folding(struct hists *hists, bool unfold)
{
struct rb_node *nd;
- self->nr_entries = 0;
+ hists->nr_entries = 0;
- for (nd = rb_first(&self->entries); nd; nd = rb_next(nd)) {
+ for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
struct hist_entry *he = rb_entry(nd, struct hist_entry, rb_node);
hist_entry__set_folding(he, unfold);
- self->nr_entries += 1 + he->nr_rows;
+ hists->nr_entries += 1 + he->nr_rows;
}
}
-static void hist_browser__set_folding(struct hist_browser *self, bool unfold)
+static void hist_browser__set_folding(struct hist_browser *browser, bool unfold)
{
- hists__set_folding(self->hists, unfold);
- self->b.nr_entries = self->hists->nr_entries;
+ hists__set_folding(browser->hists, unfold);
+ browser->b.nr_entries = browser->hists->nr_entries;
/* Go to the start, we may be way after valid entries after a collapse */
- ui_browser__reset_index(&self->b);
+ ui_browser__reset_index(&browser->b);
}
static void ui_browser__warn_lost_events(struct ui_browser *browser)
@@ -305,64 +305,64 @@
"Or reduce the sampling frequency.");
}
-static int hist_browser__run(struct hist_browser *self, const char *ev_name,
+static int hist_browser__run(struct hist_browser *browser, const char *ev_name,
void(*timer)(void *arg), void *arg, int delay_secs)
{
int key;
char title[160];
- self->b.entries = &self->hists->entries;
- self->b.nr_entries = self->hists->nr_entries;
+ browser->b.entries = &browser->hists->entries;
+ browser->b.nr_entries = browser->hists->nr_entries;
- hist_browser__refresh_dimensions(self);
- hists__browser_title(self->hists, title, sizeof(title), ev_name);
+ hist_browser__refresh_dimensions(browser);
+ hists__browser_title(browser->hists, title, sizeof(title), ev_name);
- if (ui_browser__show(&self->b, title,
+ if (ui_browser__show(&browser->b, title,
"Press '?' for help on key bindings") < 0)
return -1;
while (1) {
- key = ui_browser__run(&self->b, delay_secs);
+ key = ui_browser__run(&browser->b, delay_secs);
switch (key) {
case K_TIMER:
timer(arg);
- ui_browser__update_nr_entries(&self->b, self->hists->nr_entries);
+ ui_browser__update_nr_entries(&browser->b, browser->hists->nr_entries);
- if (self->hists->stats.nr_lost_warned !=
- self->hists->stats.nr_events[PERF_RECORD_LOST]) {
- self->hists->stats.nr_lost_warned =
- self->hists->stats.nr_events[PERF_RECORD_LOST];
- ui_browser__warn_lost_events(&self->b);
+ if (browser->hists->stats.nr_lost_warned !=
+ browser->hists->stats.nr_events[PERF_RECORD_LOST]) {
+ browser->hists->stats.nr_lost_warned =
+ browser->hists->stats.nr_events[PERF_RECORD_LOST];
+ ui_browser__warn_lost_events(&browser->b);
}
- hists__browser_title(self->hists, title, sizeof(title), ev_name);
- ui_browser__show_title(&self->b, title);
+ hists__browser_title(browser->hists, title, sizeof(title), ev_name);
+ ui_browser__show_title(&browser->b, title);
continue;
case 'D': { /* Debug */
static int seq;
- struct hist_entry *h = rb_entry(self->b.top,
+ struct hist_entry *h = rb_entry(browser->b.top,
struct hist_entry, rb_node);
ui_helpline__pop();
ui_helpline__fpush("%d: nr_ent=(%d,%d), height=%d, idx=%d, fve: idx=%d, row_off=%d, nrows=%d",
- seq++, self->b.nr_entries,
- self->hists->nr_entries,
- self->b.height,
- self->b.index,
- self->b.top_idx,
+ seq++, browser->b.nr_entries,
+ browser->hists->nr_entries,
+ browser->b.height,
+ browser->b.index,
+ browser->b.top_idx,
h->row_offset, h->nr_rows);
}
break;
case 'C':
/* Collapse the whole world. */
- hist_browser__set_folding(self, false);
+ hist_browser__set_folding(browser, false);
break;
case 'E':
/* Expand the whole world. */
- hist_browser__set_folding(self, true);
+ hist_browser__set_folding(browser, true);
break;
case K_ENTER:
- if (hist_browser__toggle_fold(self))
+ if (hist_browser__toggle_fold(browser))
break;
/* fall thru */
default:
@@ -370,23 +370,23 @@
}
}
out:
- ui_browser__hide(&self->b);
+ ui_browser__hide(&browser->b);
return key;
}
-static char *callchain_list__sym_name(struct callchain_list *self,
+static char *callchain_list__sym_name(struct callchain_list *cl,
char *bf, size_t bfsize)
{
- if (self->ms.sym)
- return self->ms.sym->name;
+ if (cl->ms.sym)
+ return cl->ms.sym->name;
- snprintf(bf, bfsize, "%#" PRIx64, self->ip);
+ snprintf(bf, bfsize, "%#" PRIx64, cl->ip);
return bf;
}
#define LEVEL_OFFSET_STEP 3
-static int hist_browser__show_callchain_node_rb_tree(struct hist_browser *self,
+static int hist_browser__show_callchain_node_rb_tree(struct hist_browser *browser,
struct callchain_node *chain_node,
u64 total, int level,
unsigned short row,
@@ -444,21 +444,21 @@
}
color = HE_COLORSET_NORMAL;
- width = self->b.width - (offset + extra_offset + 2);
- if (ui_browser__is_current_entry(&self->b, row)) {
- self->selection = &chain->ms;
+ width = browser->b.width - (offset + extra_offset + 2);
+ if (ui_browser__is_current_entry(&browser->b, row)) {
+ browser->selection = &chain->ms;
color = HE_COLORSET_SELECTED;
*is_current_entry = true;
}
- ui_browser__set_color(&self->b, color);
- ui_browser__gotorc(&self->b, row, 0);
+ ui_browser__set_color(&browser->b, color);
+ ui_browser__gotorc(&browser->b, row, 0);
slsmg_write_nstring(" ", offset + extra_offset);
slsmg_printf("%c ", folded_sign);
slsmg_write_nstring(str, width);
free(alloc_str);
- if (++row == self->b.height)
+ if (++row == browser->b.height)
goto out;
do_next:
if (folded_sign == '+')
@@ -467,11 +467,11 @@
if (folded_sign == '-') {
const int new_level = level + (extra_offset ? 2 : 1);
- row += hist_browser__show_callchain_node_rb_tree(self, child, new_total,
+ row += hist_browser__show_callchain_node_rb_tree(browser, child, new_total,
new_level, row, row_offset,
is_current_entry);
}
- if (row == self->b.height)
+ if (row == browser->b.height)
goto out;
node = next;
}
@@ -479,7 +479,7 @@
return row - first_row;
}
-static int hist_browser__show_callchain_node(struct hist_browser *self,
+static int hist_browser__show_callchain_node(struct hist_browser *browser,
struct callchain_node *node,
int level, unsigned short row,
off_t *row_offset,
@@ -488,7 +488,7 @@
struct callchain_list *chain;
int first_row = row,
offset = level * LEVEL_OFFSET_STEP,
- width = self->b.width - offset;
+ width = browser->b.width - offset;
char folded_sign = ' ';
list_for_each_entry(chain, &node->val, list) {
@@ -503,26 +503,26 @@
}
color = HE_COLORSET_NORMAL;
- if (ui_browser__is_current_entry(&self->b, row)) {
- self->selection = &chain->ms;
+ if (ui_browser__is_current_entry(&browser->b, row)) {
+ browser->selection = &chain->ms;
color = HE_COLORSET_SELECTED;
*is_current_entry = true;
}
s = callchain_list__sym_name(chain, ipstr, sizeof(ipstr));
- ui_browser__gotorc(&self->b, row, 0);
- ui_browser__set_color(&self->b, color);
+ ui_browser__gotorc(&browser->b, row, 0);
+ ui_browser__set_color(&browser->b, color);
slsmg_write_nstring(" ", offset);
slsmg_printf("%c ", folded_sign);
slsmg_write_nstring(s, width - 2);
- if (++row == self->b.height)
+ if (++row == browser->b.height)
goto out;
}
if (folded_sign == '-')
- row += hist_browser__show_callchain_node_rb_tree(self, node,
- self->hists->stats.total_period,
+ row += hist_browser__show_callchain_node_rb_tree(browser, node,
+ browser->hists->stats.total_period,
level + 1, row,
row_offset,
is_current_entry);
@@ -530,7 +530,7 @@
return row - first_row;
}
-static int hist_browser__show_callchain(struct hist_browser *self,
+static int hist_browser__show_callchain(struct hist_browser *browser,
struct rb_root *chain,
int level, unsigned short row,
off_t *row_offset,
@@ -542,31 +542,31 @@
for (nd = rb_first(chain); nd; nd = rb_next(nd)) {
struct callchain_node *node = rb_entry(nd, struct callchain_node, rb_node);
- row += hist_browser__show_callchain_node(self, node, level,
+ row += hist_browser__show_callchain_node(browser, node, level,
row, row_offset,
is_current_entry);
- if (row == self->b.height)
+ if (row == browser->b.height)
break;
}
return row - first_row;
}
-static int hist_browser__show_entry(struct hist_browser *self,
+static int hist_browser__show_entry(struct hist_browser *browser,
struct hist_entry *entry,
unsigned short row)
{
char s[256];
double percent;
int printed = 0;
- int width = self->b.width - 6; /* The percentage */
+ int width = browser->b.width - 6; /* The percentage */
char folded_sign = ' ';
- bool current_entry = ui_browser__is_current_entry(&self->b, row);
+ bool current_entry = ui_browser__is_current_entry(&browser->b, row);
off_t row_offset = entry->row_offset;
if (current_entry) {
- self->he_selection = entry;
- self->selection = &entry->ms;
+ browser->he_selection = entry;
+ browser->selection = &entry->ms;
}
if (symbol_conf.use_callchain) {
@@ -575,11 +575,11 @@
}
if (row_offset == 0) {
- hist_entry__snprintf(entry, s, sizeof(s), self->hists);
- percent = (entry->period * 100.0) / self->hists->stats.total_period;
+ hist_entry__snprintf(entry, s, sizeof(s), browser->hists);
+ percent = (entry->period * 100.0) / browser->hists->stats.total_period;
- ui_browser__set_percent_color(&self->b, percent, current_entry);
- ui_browser__gotorc(&self->b, row, 0);
+ ui_browser__set_percent_color(&browser->b, percent, current_entry);
+ ui_browser__gotorc(&browser->b, row, 0);
if (symbol_conf.use_callchain) {
slsmg_printf("%c ", folded_sign);
width -= 2;
@@ -588,11 +588,11 @@
slsmg_printf(" %5.2f%%", percent);
/* The scroll bar isn't being used */
- if (!self->b.navkeypressed)
+ if (!browser->b.navkeypressed)
width += 1;
- if (!current_entry || !self->b.navkeypressed)
- ui_browser__set_color(&self->b, HE_COLORSET_NORMAL);
+ if (!current_entry || !browser->b.navkeypressed)
+ ui_browser__set_color(&browser->b, HE_COLORSET_NORMAL);
if (symbol_conf.show_nr_samples) {
slsmg_printf(" %11u", entry->nr_events);
@@ -610,12 +610,12 @@
} else
--row_offset;
- if (folded_sign == '-' && row != self->b.height) {
- printed += hist_browser__show_callchain(self, &entry->sorted_chain,
+ if (folded_sign == '-' && row != browser->b.height) {
+ printed += hist_browser__show_callchain(browser, &entry->sorted_chain,
1, row, &row_offset,
¤t_entry);
if (current_entry)
- self->he_selection = entry;
+ browser->he_selection = entry;
}
return printed;
@@ -631,22 +631,22 @@
}
}
-static unsigned int hist_browser__refresh(struct ui_browser *self)
+static unsigned int hist_browser__refresh(struct ui_browser *browser)
{
unsigned row = 0;
struct rb_node *nd;
- struct hist_browser *hb = container_of(self, struct hist_browser, b);
+ struct hist_browser *hb = container_of(browser, struct hist_browser, b);
- ui_browser__hists_init_top(self);
+ ui_browser__hists_init_top(browser);
- for (nd = self->top; nd; nd = rb_next(nd)) {
+ for (nd = browser->top; nd; nd = rb_next(nd)) {
struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
if (h->filtered)
continue;
row += hist_browser__show_entry(hb, h, row);
- if (row == self->height)
+ if (row == browser->height)
break;
}
@@ -679,27 +679,27 @@
return NULL;
}
-static void ui_browser__hists_seek(struct ui_browser *self,
+static void ui_browser__hists_seek(struct ui_browser *browser,
off_t offset, int whence)
{
struct hist_entry *h;
struct rb_node *nd;
bool first = true;
- if (self->nr_entries == 0)
+ if (browser->nr_entries == 0)
return;
- ui_browser__hists_init_top(self);
+ ui_browser__hists_init_top(browser);
switch (whence) {
case SEEK_SET:
- nd = hists__filter_entries(rb_first(self->entries));
+ nd = hists__filter_entries(rb_first(browser->entries));
break;
case SEEK_CUR:
- nd = self->top;
+ nd = browser->top;
goto do_offset;
case SEEK_END:
- nd = hists__filter_prev_entries(rb_last(self->entries));
+ nd = hists__filter_prev_entries(rb_last(browser->entries));
first = false;
break;
default:
@@ -710,7 +710,7 @@
* Moves not relative to the first visible entry invalidates its
* row_offset:
*/
- h = rb_entry(self->top, struct hist_entry, rb_node);
+ h = rb_entry(browser->top, struct hist_entry, rb_node);
h->row_offset = 0;
/*
@@ -738,7 +738,7 @@
} else {
h->row_offset += offset;
offset = 0;
- self->top = nd;
+ browser->top = nd;
break;
}
}
@@ -746,7 +746,7 @@
if (nd == NULL)
break;
--offset;
- self->top = nd;
+ browser->top = nd;
} while (offset != 0);
} else if (offset < 0) {
while (1) {
@@ -759,7 +759,7 @@
} else {
h->row_offset += offset;
offset = 0;
- self->top = nd;
+ browser->top = nd;
break;
}
} else {
@@ -769,7 +769,7 @@
} else {
h->row_offset = h->nr_rows + offset;
offset = 0;
- self->top = nd;
+ browser->top = nd;
break;
}
}
@@ -779,7 +779,7 @@
if (nd == NULL)
break;
++offset;
- self->top = nd;
+ browser->top = nd;
if (offset == 0) {
/*
* Last unfiltered hist_entry, check if it is
@@ -794,7 +794,7 @@
first = false;
}
} else {
- self->top = nd;
+ browser->top = nd;
h = rb_entry(nd, struct hist_entry, rb_node);
h->row_offset = 0;
}
@@ -802,46 +802,46 @@
static struct hist_browser *hist_browser__new(struct hists *hists)
{
- struct hist_browser *self = zalloc(sizeof(*self));
+ struct hist_browser *browser = zalloc(sizeof(*browser));
- if (self) {
- self->hists = hists;
- self->b.refresh = hist_browser__refresh;
- self->b.seek = ui_browser__hists_seek;
- self->b.use_navkeypressed = true;
+ if (browser) {
+ browser->hists = hists;
+ browser->b.refresh = hist_browser__refresh;
+ browser->b.seek = ui_browser__hists_seek;
+ browser->b.use_navkeypressed = true;
if (sort__branch_mode == 1)
- self->has_symbols = sort_sym_from.list.next != NULL;
+ browser->has_symbols = sort_sym_from.list.next != NULL;
else
- self->has_symbols = sort_sym.list.next != NULL;
+ browser->has_symbols = sort_sym.list.next != NULL;
}
- return self;
+ return browser;
}
-static void hist_browser__delete(struct hist_browser *self)
+static void hist_browser__delete(struct hist_browser *browser)
{
- free(self);
+ free(browser);
}
-static struct hist_entry *hist_browser__selected_entry(struct hist_browser *self)
+static struct hist_entry *hist_browser__selected_entry(struct hist_browser *browser)
{
- return self->he_selection;
+ return browser->he_selection;
}
-static struct thread *hist_browser__selected_thread(struct hist_browser *self)
+static struct thread *hist_browser__selected_thread(struct hist_browser *browser)
{
- return self->he_selection->thread;
+ return browser->he_selection->thread;
}
-static int hists__browser_title(struct hists *self, char *bf, size_t size,
+static int hists__browser_title(struct hists *hists, char *bf, size_t size,
const char *ev_name)
{
char unit;
int printed;
- const struct dso *dso = self->dso_filter;
- const struct thread *thread = self->thread_filter;
- unsigned long nr_samples = self->stats.nr_events[PERF_RECORD_SAMPLE];
- u64 nr_events = self->stats.total_period;
+ const struct dso *dso = hists->dso_filter;
+ const struct thread *thread = hists->thread_filter;
+ unsigned long nr_samples = hists->stats.nr_events[PERF_RECORD_SAMPLE];
+ u64 nr_events = hists->stats.total_period;
nr_samples = convert_unit(nr_samples, &unit);
printed = scnprintf(bf, size,
@@ -849,9 +849,9 @@
nr_samples, unit, ev_name, nr_events);
- if (self->uid_filter_str)
+ if (hists->uid_filter_str)
printed += snprintf(bf + printed, size - printed,
- ", UID: %s", self->uid_filter_str);
+ ", UID: %s", hists->uid_filter_str);
if (thread)
printed += scnprintf(bf + printed, size - printed,
", Thread: %s(%d)",
@@ -879,8 +879,8 @@
void(*timer)(void *arg), void *arg,
int delay_secs)
{
- struct hists *self = &evsel->hists;
- struct hist_browser *browser = hist_browser__new(self);
+ struct hists *hists = &evsel->hists;
+ struct hist_browser *browser = hist_browser__new(hists);
struct branch_info *bi;
struct pstack *fstack;
char *options[16];
@@ -946,8 +946,8 @@
"Please enter the name of symbol you want to see",
buf, "ENTER: OK, ESC: Cancel",
delay_secs * 2) == K_ENTER) {
- self->symbol_filter_str = *buf ? buf : NULL;
- hists__filter_by_symbol(self);
+ hists->symbol_filter_str = *buf ? buf : NULL;
+ hists__filter_by_symbol(hists);
hist_browser__reset(browser);
}
continue;
@@ -1128,7 +1128,7 @@
sort_dso.elide = true;
pstack__push(fstack, &browser->hists->dso_filter);
}
- hists__filter_by_dso(self);
+ hists__filter_by_dso(hists);
hist_browser__reset(browser);
} else if (choice == zoom_thread) {
zoom_thread:
@@ -1146,7 +1146,7 @@
sort_thread.elide = true;
pstack__push(fstack, &browser->hists->thread_filter);
}
- hists__filter_by_thread(self);
+ hists__filter_by_thread(hists);
hist_browser__reset(browser);
}
}
diff --git a/tools/perf/ui/setup.c b/tools/perf/ui/setup.c
index 9f5f888..791fb15 100644
--- a/tools/perf/ui/setup.c
+++ b/tools/perf/ui/setup.c
@@ -22,6 +22,7 @@
break;
/* fall through */
default:
+ use_browser = 0;
if (fallback_to_pager)
setup_pager();
break;
diff --git a/tools/perf/util/config.c b/tools/perf/util/config.c
index 0deac6a..6faa3a1 100644
--- a/tools/perf/util/config.c
+++ b/tools/perf/util/config.c
@@ -120,7 +120,7 @@
static inline int iskeychar(int c)
{
- return isalnum(c) || c == '-';
+ return isalnum(c) || c == '-' || c == '_';
}
static int get_value(config_fn_t fn, void *data, char *name, unsigned int len)
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index 57e4ce5..91d1913 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -15,6 +15,7 @@
#include "cpumap.h"
#include "thread_map.h"
#include "target.h"
+#include "../../include/linux/perf_event.h"
#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
#define GROUP_FD(group_fd, cpu) (*(int *)xyarray__entry(group_fd, cpu, 0))
@@ -64,6 +65,95 @@
return evsel;
}
+static const char *perf_evsel__hw_names[PERF_COUNT_HW_MAX] = {
+ "cycles",
+ "instructions",
+ "cache-references",
+ "cache-misses",
+ "branches",
+ "branch-misses",
+ "bus-cycles",
+ "stalled-cycles-frontend",
+ "stalled-cycles-backend",
+ "ref-cycles",
+};
+
+const char *__perf_evsel__hw_name(u64 config)
+{
+ if (config < PERF_COUNT_HW_MAX && perf_evsel__hw_names[config])
+ return perf_evsel__hw_names[config];
+
+ return "unknown-hardware";
+}
+
+static int perf_evsel__hw_name(struct perf_evsel *evsel, char *bf, size_t size)
+{
+ int colon = 0;
+ struct perf_event_attr *attr = &evsel->attr;
+ int r = scnprintf(bf, size, "%s", __perf_evsel__hw_name(attr->config));
+ bool exclude_guest_default = false;
+
+#define MOD_PRINT(context, mod) do { \
+ if (!attr->exclude_##context) { \
+ if (!colon) colon = r++; \
+ r += scnprintf(bf + r, size - r, "%c", mod); \
+ } } while(0)
+
+ if (attr->exclude_kernel || attr->exclude_user || attr->exclude_hv) {
+ MOD_PRINT(kernel, 'k');
+ MOD_PRINT(user, 'u');
+ MOD_PRINT(hv, 'h');
+ exclude_guest_default = true;
+ }
+
+ if (attr->precise_ip) {
+ if (!colon)
+ colon = r++;
+ r += scnprintf(bf + r, size - r, "%.*s", attr->precise_ip, "ppp");
+ exclude_guest_default = true;
+ }
+
+ if (attr->exclude_host || attr->exclude_guest == exclude_guest_default) {
+ MOD_PRINT(host, 'H');
+ MOD_PRINT(guest, 'G');
+ }
+#undef MOD_PRINT
+ if (colon)
+ bf[colon] = ':';
+ return r;
+}
+
+int perf_evsel__name(struct perf_evsel *evsel, char *bf, size_t size)
+{
+ int ret;
+
+ switch (evsel->attr.type) {
+ case PERF_TYPE_RAW:
+ ret = scnprintf(bf, size, "raw 0x%" PRIx64, evsel->attr.config);
+ break;
+
+ case PERF_TYPE_HARDWARE:
+ ret = perf_evsel__hw_name(evsel, bf, size);
+ break;
+ default:
+ /*
+ * FIXME
+ *
+ * This is the minimal perf_evsel__name so that we can
+ * reconstruct event names taking into account event modifiers.
+ *
+ * The old event_name uses it now for raw anr hw events, so that
+ * we don't drag all the parsing stuff into the python binding.
+ *
+ * On the next devel cycle the rest of the event naming will be
+ * brought here.
+ */
+ return 0;
+ }
+
+ return ret;
+}
+
void perf_evsel__config(struct perf_evsel *evsel, struct perf_record_opts *opts,
struct perf_evsel *first)
{
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index 3d6b3e4..4ba8b56 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -83,6 +83,9 @@
struct perf_record_opts *opts,
struct perf_evsel *first);
+const char* __perf_evsel__hw_name(u64 config);
+int perf_evsel__name(struct perf_evsel *evsel, char *bf, size_t size);
+
int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads);
int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads);
int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus);
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index fac7d59..05dbc8b 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -62,19 +62,6 @@
#define PERF_EVENT_TYPE(config) __PERF_EVENT_FIELD(config, TYPE)
#define PERF_EVENT_ID(config) __PERF_EVENT_FIELD(config, EVENT)
-static const char *hw_event_names[PERF_COUNT_HW_MAX] = {
- "cycles",
- "instructions",
- "cache-references",
- "cache-misses",
- "branches",
- "branch-misses",
- "bus-cycles",
- "stalled-cycles-frontend",
- "stalled-cycles-backend",
- "ref-cycles",
-};
-
static const char *sw_event_names[PERF_COUNT_SW_MAX] = {
"cpu-clock",
"task-clock",
@@ -300,6 +287,16 @@
u64 config = evsel->attr.config;
int type = evsel->attr.type;
+ if (type == PERF_TYPE_RAW || type == PERF_TYPE_HARDWARE) {
+ /*
+ * XXX minimal fix, see comment on perf_evsen__name, this static buffer
+ * will go away together with event_name in the next devel cycle.
+ */
+ static char bf[128];
+ perf_evsel__name(evsel, bf, sizeof(bf));
+ return bf;
+ }
+
if (evsel->name)
return evsel->name;
@@ -317,9 +314,7 @@
switch (type) {
case PERF_TYPE_HARDWARE:
- if (config < PERF_COUNT_HW_MAX && hw_event_names[config])
- return hw_event_names[config];
- return "unknown-hardware";
+ return __perf_evsel__hw_name(config);
case PERF_TYPE_HW_CACHE: {
u8 cache_type, cache_op, cache_result;
diff --git a/tools/perf/util/thread_map.c b/tools/perf/util/thread_map.c
index 84d9bd78..9b5f856 100644
--- a/tools/perf/util/thread_map.c
+++ b/tools/perf/util/thread_map.c
@@ -188,28 +188,27 @@
nt = realloc(threads, (sizeof(*threads) +
sizeof(pid_t) * total_tasks));
if (nt == NULL)
- goto out_free_threads;
+ goto out_free_namelist;
threads = nt;
- if (threads) {
- for (i = 0; i < items; i++)
- threads->map[j++] = atoi(namelist[i]->d_name);
- threads->nr = total_tasks;
- }
-
- for (i = 0; i < items; i++)
+ for (i = 0; i < items; i++) {
+ threads->map[j++] = atoi(namelist[i]->d_name);
free(namelist[i]);
+ }
+ threads->nr = total_tasks;
free(namelist);
-
- if (!threads)
- break;
}
out:
strlist__delete(slist);
return threads;
+out_free_namelist:
+ for (i = 0; i < items; i++)
+ free(namelist[i]);
+ free(namelist);
+
out_free_threads:
free(threads);
threads = NULL;