Merge branch 'hwmon-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jdelvare/staging
Pull hwmon fixes from Jean Delvare.
* 'hwmon-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jdelvare/staging:
hwmon: (lm75) Fix tcn75 prefix
hwmon: (lm75.h) Update header inclusion
MAINTAINERS: Remove Mark M. Hoffman
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 2c3bdce..13b7394 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -49,7 +49,6 @@
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_SYSCALL_TRACEPOINTS
select HAVE_UID16
- select VIRT_TO_BUS
select KTIME_SCALAR
select PERF_USE_VMALLOC
select RTC_LIB
@@ -743,6 +742,7 @@
select NEED_MACH_IO_H
select NEED_MACH_MEMORY_H
select NO_IOPORT
+ select VIRT_TO_BUS
help
On the Acorn Risc-PC, Linux can support the internal IDE disk and
CD-ROM interface, serial and parallel port, and the floppy drive.
@@ -878,6 +878,7 @@
select ISA_DMA
select NEED_MACH_MEMORY_H
select PCI
+ select VIRT_TO_BUS
select ZONE_DMA
help
Support for the StrongARM based Digital DNARD machine, also known
@@ -1005,12 +1006,12 @@
bool
config ARCH_MULTI_V6
- bool "ARMv6 based platforms (ARM11, Scorpion, ...)"
+ bool "ARMv6 based platforms (ARM11)"
select ARCH_MULTI_V6_V7
select CPU_V6
config ARCH_MULTI_V7
- bool "ARMv7 based platforms (Cortex-A, PJ4, Krait)"
+ bool "ARMv7 based platforms (Cortex-A, PJ4, Scorpion, Krait)"
default y
select ARCH_MULTI_V6_V7
select ARCH_VEXPRESS
@@ -1461,10 +1462,6 @@
bool
select ISA_DMA_API
-config ARCH_NO_VIRT_TO_BUS
- def_bool y
- depends on !ARCH_RPC && !ARCH_NETWINDER && !ARCH_SHARK
-
# Select ISA DMA interface
config ISA_DMA_API
bool
diff --git a/arch/arm/boot/dts/at91sam9x5.dtsi b/arch/arm/boot/dts/at91sam9x5.dtsi
index aa98e64..a98c0d5 100644
--- a/arch/arm/boot/dts/at91sam9x5.dtsi
+++ b/arch/arm/boot/dts/at91sam9x5.dtsi
@@ -238,8 +238,32 @@
nand {
pinctrl_nand: nand-0 {
atmel,pins =
- <3 4 0x0 0x1 /* PD5 gpio RDY pin pull_up */
- 3 5 0x0 0x1>; /* PD4 gpio enable pin pull_up */
+ <3 0 0x1 0x0 /* PD0 periph A Read Enable */
+ 3 1 0x1 0x0 /* PD1 periph A Write Enable */
+ 3 2 0x1 0x0 /* PD2 periph A Address Latch Enable */
+ 3 3 0x1 0x0 /* PD3 periph A Command Latch Enable */
+ 3 4 0x0 0x1 /* PD4 gpio Chip Enable pin pull_up */
+ 3 5 0x0 0x1 /* PD5 gpio RDY/BUSY pin pull_up */
+ 3 6 0x1 0x0 /* PD6 periph A Data bit 0 */
+ 3 7 0x1 0x0 /* PD7 periph A Data bit 1 */
+ 3 8 0x1 0x0 /* PD8 periph A Data bit 2 */
+ 3 9 0x1 0x0 /* PD9 periph A Data bit 3 */
+ 3 10 0x1 0x0 /* PD10 periph A Data bit 4 */
+ 3 11 0x1 0x0 /* PD11 periph A Data bit 5 */
+ 3 12 0x1 0x0 /* PD12 periph A Data bit 6 */
+ 3 13 0x1 0x0>; /* PD13 periph A Data bit 7 */
+ };
+
+ pinctrl_nand_16bits: nand_16bits-0 {
+ atmel,pins =
+ <3 14 0x1 0x0 /* PD14 periph A Data bit 8 */
+ 3 15 0x1 0x0 /* PD15 periph A Data bit 9 */
+ 3 16 0x1 0x0 /* PD16 periph A Data bit 10 */
+ 3 17 0x1 0x0 /* PD17 periph A Data bit 11 */
+ 3 18 0x1 0x0 /* PD18 periph A Data bit 12 */
+ 3 19 0x1 0x0 /* PD19 periph A Data bit 13 */
+ 3 20 0x1 0x0 /* PD20 periph A Data bit 14 */
+ 3 21 0x1 0x0>; /* PD21 periph A Data bit 15 */
};
};
diff --git a/arch/arm/boot/dts/exynos4.dtsi b/arch/arm/boot/dts/exynos4.dtsi
index e1347fc..1a62bcf 100644
--- a/arch/arm/boot/dts/exynos4.dtsi
+++ b/arch/arm/boot/dts/exynos4.dtsi
@@ -275,18 +275,27 @@
compatible = "arm,pl330", "arm,primecell";
reg = <0x12680000 0x1000>;
interrupts = <0 35 0>;
+ #dma-cells = <1>;
+ #dma-channels = <8>;
+ #dma-requests = <32>;
};
pdma1: pdma@12690000 {
compatible = "arm,pl330", "arm,primecell";
reg = <0x12690000 0x1000>;
interrupts = <0 36 0>;
+ #dma-cells = <1>;
+ #dma-channels = <8>;
+ #dma-requests = <32>;
};
mdma1: mdma@12850000 {
compatible = "arm,pl330", "arm,primecell";
reg = <0x12850000 0x1000>;
interrupts = <0 34 0>;
+ #dma-cells = <1>;
+ #dma-channels = <8>;
+ #dma-requests = <1>;
};
};
};
diff --git a/arch/arm/boot/dts/exynos5440.dtsi b/arch/arm/boot/dts/exynos5440.dtsi
index 5f3562a..9a99755 100644
--- a/arch/arm/boot/dts/exynos5440.dtsi
+++ b/arch/arm/boot/dts/exynos5440.dtsi
@@ -142,12 +142,18 @@
compatible = "arm,pl330", "arm,primecell";
reg = <0x120000 0x1000>;
interrupts = <0 34 0>;
+ #dma-cells = <1>;
+ #dma-channels = <8>;
+ #dma-requests = <32>;
};
pdma1: pdma@121B0000 {
compatible = "arm,pl330", "arm,primecell";
reg = <0x121000 0x1000>;
interrupts = <0 35 0>;
+ #dma-cells = <1>;
+ #dma-channels = <8>;
+ #dma-requests = <32>;
};
};
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 31644f1..79078ed 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -480,7 +480,7 @@
evt->features = CLOCK_EVT_FEAT_ONESHOT |
CLOCK_EVT_FEAT_PERIODIC |
CLOCK_EVT_FEAT_DUMMY;
- evt->rating = 400;
+ evt->rating = 100;
evt->mult = 1;
evt->set_mode = broadcast_timer_set_mode;
diff --git a/arch/arm/lib/memset.S b/arch/arm/lib/memset.S
index d912e73..94b0650 100644
--- a/arch/arm/lib/memset.S
+++ b/arch/arm/lib/memset.S
@@ -14,31 +14,15 @@
.text
.align 5
- .word 0
-
-1: subs r2, r2, #4 @ 1 do we have enough
- blt 5f @ 1 bytes to align with?
- cmp r3, #2 @ 1
- strltb r1, [ip], #1 @ 1
- strleb r1, [ip], #1 @ 1
- strb r1, [ip], #1 @ 1
- add r2, r2, r3 @ 1 (r2 = r2 - (4 - r3))
-/*
- * The pointer is now aligned and the length is adjusted. Try doing the
- * memset again.
- */
ENTRY(memset)
-/*
- * Preserve the contents of r0 for the return value.
- */
- mov ip, r0
- ands r3, ip, #3 @ 1 unaligned?
- bne 1b @ 1
+ ands r3, r0, #3 @ 1 unaligned?
+ mov ip, r0 @ preserve r0 as return value
+ bne 6f @ 1
/*
* we know that the pointer in ip is aligned to a word boundary.
*/
- orr r1, r1, r1, lsl #8
+1: orr r1, r1, r1, lsl #8
orr r1, r1, r1, lsl #16
mov r3, r1
cmp r2, #16
@@ -127,4 +111,13 @@
tst r2, #1
strneb r1, [ip], #1
mov pc, lr
+
+6: subs r2, r2, #4 @ 1 do we have enough
+ blt 5b @ 1 bytes to align with?
+ cmp r3, #2 @ 1
+ strltb r1, [ip], #1 @ 1
+ strleb r1, [ip], #1 @ 1
+ strb r1, [ip], #1 @ 1
+ add r2, r2, r3 @ 1 (r2 = r2 - (4 - r3))
+ b 1b
ENDPROC(memset)
diff --git a/arch/arm/mach-at91/include/mach/gpio.h b/arch/arm/mach-at91/include/mach/gpio.h
index eed465a..5fc2377 100644
--- a/arch/arm/mach-at91/include/mach/gpio.h
+++ b/arch/arm/mach-at91/include/mach/gpio.h
@@ -209,6 +209,14 @@
extern void at91_gpio_suspend(void);
extern void at91_gpio_resume(void);
+#ifdef CONFIG_PINCTRL_AT91
+extern void at91_pinctrl_gpio_suspend(void);
+extern void at91_pinctrl_gpio_resume(void);
+#else
+static inline void at91_pinctrl_gpio_suspend(void) {}
+static inline void at91_pinctrl_gpio_resume(void) {}
+#endif
+
#endif /* __ASSEMBLY__ */
#endif
diff --git a/arch/arm/mach-at91/irq.c b/arch/arm/mach-at91/irq.c
index 8e21026..e0ca591 100644
--- a/arch/arm/mach-at91/irq.c
+++ b/arch/arm/mach-at91/irq.c
@@ -92,23 +92,21 @@
void at91_irq_suspend(void)
{
- int i = 0, bit;
+ int bit = -1;
if (has_aic5()) {
/* disable enabled irqs */
- while ((bit = find_next_bit(backups, n_irqs, i)) < n_irqs) {
+ while ((bit = find_next_bit(backups, n_irqs, bit + 1)) < n_irqs) {
at91_aic_write(AT91_AIC5_SSR,
bit & AT91_AIC5_INTSEL_MSK);
at91_aic_write(AT91_AIC5_IDCR, 1);
- i = bit;
}
/* enable wakeup irqs */
- i = 0;
- while ((bit = find_next_bit(wakeups, n_irqs, i)) < n_irqs) {
+ bit = -1;
+ while ((bit = find_next_bit(wakeups, n_irqs, bit + 1)) < n_irqs) {
at91_aic_write(AT91_AIC5_SSR,
bit & AT91_AIC5_INTSEL_MSK);
at91_aic_write(AT91_AIC5_IECR, 1);
- i = bit;
}
} else {
at91_aic_write(AT91_AIC_IDCR, *backups);
@@ -118,23 +116,21 @@
void at91_irq_resume(void)
{
- int i = 0, bit;
+ int bit = -1;
if (has_aic5()) {
/* disable wakeup irqs */
- while ((bit = find_next_bit(wakeups, n_irqs, i)) < n_irqs) {
+ while ((bit = find_next_bit(wakeups, n_irqs, bit + 1)) < n_irqs) {
at91_aic_write(AT91_AIC5_SSR,
bit & AT91_AIC5_INTSEL_MSK);
at91_aic_write(AT91_AIC5_IDCR, 1);
- i = bit;
}
/* enable irqs disabled for suspend */
- i = 0;
- while ((bit = find_next_bit(backups, n_irqs, i)) < n_irqs) {
+ bit = -1;
+ while ((bit = find_next_bit(backups, n_irqs, bit + 1)) < n_irqs) {
at91_aic_write(AT91_AIC5_SSR,
bit & AT91_AIC5_INTSEL_MSK);
at91_aic_write(AT91_AIC5_IECR, 1);
- i = bit;
}
} else {
at91_aic_write(AT91_AIC_IDCR, *wakeups);
diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
index adb6db8..73f1f25 100644
--- a/arch/arm/mach-at91/pm.c
+++ b/arch/arm/mach-at91/pm.c
@@ -201,7 +201,10 @@
static int at91_pm_enter(suspend_state_t state)
{
- at91_gpio_suspend();
+ if (of_have_populated_dt())
+ at91_pinctrl_gpio_suspend();
+ else
+ at91_gpio_suspend();
at91_irq_suspend();
pr_debug("AT91: PM - wake mask %08x, pm state %d\n",
@@ -286,7 +289,10 @@
error:
target_state = PM_SUSPEND_ON;
at91_irq_resume();
- at91_gpio_resume();
+ if (of_have_populated_dt())
+ at91_pinctrl_gpio_resume();
+ else
+ at91_gpio_resume();
return 0;
}
diff --git a/arch/arm/mach-davinci/dma.c b/arch/arm/mach-davinci/dma.c
index a685e97..45b7c71 100644
--- a/arch/arm/mach-davinci/dma.c
+++ b/arch/arm/mach-davinci/dma.c
@@ -743,6 +743,9 @@
*/
int edma_alloc_slot(unsigned ctlr, int slot)
{
+ if (!edma_cc[ctlr])
+ return -EINVAL;
+
if (slot >= 0)
slot = EDMA_CHAN_SLOT(slot);
diff --git a/arch/arm/mach-footbridge/Kconfig b/arch/arm/mach-footbridge/Kconfig
index abda5a1..0f2111a 100644
--- a/arch/arm/mach-footbridge/Kconfig
+++ b/arch/arm/mach-footbridge/Kconfig
@@ -67,6 +67,7 @@
select ISA
select ISA_DMA
select PCI
+ select VIRT_TO_BUS
help
Say Y here if you intend to run this kernel on the Rebel.COM
NetWinder. Information about this machine can be found at:
diff --git a/arch/arm/mach-imx/clk-imx35.c b/arch/arm/mach-imx/clk-imx35.c
index 74e3a34..e13a8fa 100644
--- a/arch/arm/mach-imx/clk-imx35.c
+++ b/arch/arm/mach-imx/clk-imx35.c
@@ -264,6 +264,7 @@
clk_prepare_enable(clk[gpio3_gate]);
clk_prepare_enable(clk[iim_gate]);
clk_prepare_enable(clk[emi_gate]);
+ clk_prepare_enable(clk[max_gate]);
/*
* SCC is needed to boot via mmc after a watchdog reset. The clock code
diff --git a/arch/arm/mach-imx/imx25-dt.c b/arch/arm/mach-imx/imx25-dt.c
index 03b65e5..8234839 100644
--- a/arch/arm/mach-imx/imx25-dt.c
+++ b/arch/arm/mach-imx/imx25-dt.c
@@ -27,6 +27,11 @@
NULL
};
+static void __init imx25_timer_init(void)
+{
+ mx25_clocks_init_dt();
+}
+
DT_MACHINE_START(IMX25_DT, "Freescale i.MX25 (Device Tree Support)")
.map_io = mx25_map_io,
.init_early = imx25_init_early,
diff --git a/arch/arm/mach-mmp/gplugd.c b/arch/arm/mach-mmp/gplugd.c
index d1e2d59..f62b68d 100644
--- a/arch/arm/mach-mmp/gplugd.c
+++ b/arch/arm/mach-mmp/gplugd.c
@@ -9,6 +9,7 @@
*/
#include <linux/init.h>
+#include <linux/platform_device.h>
#include <linux/gpio.h>
#include <asm/mach/arch.h>
diff --git a/arch/arm/mach-s5pv210/clock.c b/arch/arm/mach-s5pv210/clock.c
index fcdf52d..f051f53 100644
--- a/arch/arm/mach-s5pv210/clock.c
+++ b/arch/arm/mach-s5pv210/clock.c
@@ -214,11 +214,6 @@
.name = "pcmcdclk",
};
-static struct clk dummy_apb_pclk = {
- .name = "apb_pclk",
- .id = -1,
-};
-
static struct clk *clkset_vpllsrc_list[] = {
[0] = &clk_fin_vpll,
[1] = &clk_sclk_hdmi27m,
@@ -305,18 +300,6 @@
static struct clk init_clocks_off[] = {
{
- .name = "dma",
- .devname = "dma-pl330.0",
- .parent = &clk_hclk_psys.clk,
- .enable = s5pv210_clk_ip0_ctrl,
- .ctrlbit = (1 << 3),
- }, {
- .name = "dma",
- .devname = "dma-pl330.1",
- .parent = &clk_hclk_psys.clk,
- .enable = s5pv210_clk_ip0_ctrl,
- .ctrlbit = (1 << 4),
- }, {
.name = "rot",
.parent = &clk_hclk_dsys.clk,
.enable = s5pv210_clk_ip0_ctrl,
@@ -573,6 +556,20 @@
.ctrlbit = (1<<19),
};
+static struct clk clk_pdma0 = {
+ .name = "pdma0",
+ .parent = &clk_hclk_psys.clk,
+ .enable = s5pv210_clk_ip0_ctrl,
+ .ctrlbit = (1 << 3),
+};
+
+static struct clk clk_pdma1 = {
+ .name = "pdma1",
+ .parent = &clk_hclk_psys.clk,
+ .enable = s5pv210_clk_ip0_ctrl,
+ .ctrlbit = (1 << 4),
+};
+
static struct clk *clkset_uart_list[] = {
[6] = &clk_mout_mpll.clk,
[7] = &clk_mout_epll.clk,
@@ -1075,6 +1072,8 @@
&clk_hsmmc1,
&clk_hsmmc2,
&clk_hsmmc3,
+ &clk_pdma0,
+ &clk_pdma1,
};
/* Clock initialisation code */
@@ -1333,6 +1332,8 @@
CLKDEV_INIT(NULL, "spi_busclk0", &clk_p),
CLKDEV_INIT("s5pv210-spi.0", "spi_busclk1", &clk_sclk_spi0.clk),
CLKDEV_INIT("s5pv210-spi.1", "spi_busclk1", &clk_sclk_spi1.clk),
+ CLKDEV_INIT("dma-pl330.0", "apb_pclk", &clk_pdma0),
+ CLKDEV_INIT("dma-pl330.1", "apb_pclk", &clk_pdma1),
};
void __init s5pv210_register_clocks(void)
@@ -1361,6 +1362,5 @@
for (ptr = 0; ptr < ARRAY_SIZE(clk_cdev); ptr++)
s3c_disable_clocks(clk_cdev[ptr], 1);
- s3c24xx_register_clock(&dummy_apb_pclk);
s3c_pwmclk_init();
}
diff --git a/arch/arm/mach-s5pv210/mach-goni.c b/arch/arm/mach-s5pv210/mach-goni.c
index 3a38f7b..e373de4 100644
--- a/arch/arm/mach-s5pv210/mach-goni.c
+++ b/arch/arm/mach-s5pv210/mach-goni.c
@@ -845,7 +845,7 @@
.mux_id = 0,
.flags = V4L2_MBUS_PCLK_SAMPLE_FALLING |
V4L2_MBUS_VSYNC_ACTIVE_LOW,
- .bus_type = FIMC_BUS_TYPE_ITU_601,
+ .fimc_bus_type = FIMC_BUS_TYPE_ITU_601,
.board_info = &noon010pc30_board_info,
.i2c_bus_num = 0,
.clk_frequency = 16000000UL,
diff --git a/arch/arm/mach-shmobile/board-marzen.c b/arch/arm/mach-shmobile/board-marzen.c
index cdcb799..fec49eb 100644
--- a/arch/arm/mach-shmobile/board-marzen.c
+++ b/arch/arm/mach-shmobile/board-marzen.c
@@ -32,6 +32,7 @@
#include <linux/smsc911x.h>
#include <linux/spi/spi.h>
#include <linux/spi/sh_hspi.h>
+#include <linux/mmc/host.h>
#include <linux/mmc/sh_mobile_sdhi.h>
#include <linux/mfd/tmio.h>
#include <linux/usb/otg.h>
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 8082151..ea5bb04 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -90,6 +90,7 @@
config PPC
bool
default y
+ select BINFMT_ELF
select OF
select OF_EARLY_FLATTREE
select HAVE_FTRACE_MCOUNT_RECORD
diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h
index 2fdb47a..b59e06f 100644
--- a/arch/powerpc/include/asm/mmu-hash64.h
+++ b/arch/powerpc/include/asm/mmu-hash64.h
@@ -343,17 +343,16 @@
/*
* VSID allocation (256MB segment)
*
- * We first generate a 38-bit "proto-VSID". For kernel addresses this
- * is equal to the ESID | 1 << 37, for user addresses it is:
- * (context << USER_ESID_BITS) | (esid & ((1U << USER_ESID_BITS) - 1)
+ * We first generate a 37-bit "proto-VSID". Proto-VSIDs are generated
+ * from mmu context id and effective segment id of the address.
*
- * This splits the proto-VSID into the below range
- * 0 - (2^(CONTEXT_BITS + USER_ESID_BITS) - 1) : User proto-VSID range
- * 2^(CONTEXT_BITS + USER_ESID_BITS) - 2^(VSID_BITS) : Kernel proto-VSID range
- *
- * We also have CONTEXT_BITS + USER_ESID_BITS = VSID_BITS - 1
- * That is, we assign half of the space to user processes and half
- * to the kernel.
+ * For user processes max context id is limited to ((1ul << 19) - 5)
+ * for kernel space, we use the top 4 context ids to map address as below
+ * NOTE: each context only support 64TB now.
+ * 0x7fffc - [ 0xc000000000000000 - 0xc0003fffffffffff ]
+ * 0x7fffd - [ 0xd000000000000000 - 0xd0003fffffffffff ]
+ * 0x7fffe - [ 0xe000000000000000 - 0xe0003fffffffffff ]
+ * 0x7ffff - [ 0xf000000000000000 - 0xf0003fffffffffff ]
*
* The proto-VSIDs are then scrambled into real VSIDs with the
* multiplicative hash:
@@ -363,41 +362,49 @@
* VSID_MULTIPLIER is prime, so in particular it is
* co-prime to VSID_MODULUS, making this a 1:1 scrambling function.
* Because the modulus is 2^n-1 we can compute it efficiently without
- * a divide or extra multiply (see below).
+ * a divide or extra multiply (see below). The scramble function gives
+ * robust scattering in the hash table (at least based on some initial
+ * results).
*
- * This scheme has several advantages over older methods:
+ * We also consider VSID 0 special. We use VSID 0 for slb entries mapping
+ * bad address. This enables us to consolidate bad address handling in
+ * hash_page.
*
- * - We have VSIDs allocated for every kernel address
- * (i.e. everything above 0xC000000000000000), except the very top
- * segment, which simplifies several things.
- *
- * - We allow for USER_ESID_BITS significant bits of ESID and
- * CONTEXT_BITS bits of context for user addresses.
- * i.e. 64T (46 bits) of address space for up to half a million contexts.
- *
- * - The scramble function gives robust scattering in the hash
- * table (at least based on some initial results). The previous
- * method was more susceptible to pathological cases giving excessive
- * hash collisions.
+ * We also need to avoid the last segment of the last context, because that
+ * would give a protovsid of 0x1fffffffff. That will result in a VSID 0
+ * because of the modulo operation in vsid scramble. But the vmemmap
+ * (which is what uses region 0xf) will never be close to 64TB in size
+ * (it's 56 bytes per page of system memory).
*/
+#define CONTEXT_BITS 19
+#define ESID_BITS 18
+#define ESID_BITS_1T 6
+
+/*
+ * 256MB segment
+ * The proto-VSID space has 2^(CONTEX_BITS + ESID_BITS) - 1 segments
+ * available for user + kernel mapping. The top 4 contexts are used for
+ * kernel mapping. Each segment contains 2^28 bytes. Each
+ * context maps 2^46 bytes (64TB) so we can support 2^19-1 contexts
+ * (19 == 37 + 28 - 46).
+ */
+#define MAX_USER_CONTEXT ((ASM_CONST(1) << CONTEXT_BITS) - 5)
+
/*
* This should be computed such that protovosid * vsid_mulitplier
* doesn't overflow 64 bits. It should also be co-prime to vsid_modulus
*/
#define VSID_MULTIPLIER_256M ASM_CONST(12538073) /* 24-bit prime */
-#define VSID_BITS_256M 38
+#define VSID_BITS_256M (CONTEXT_BITS + ESID_BITS)
#define VSID_MODULUS_256M ((1UL<<VSID_BITS_256M)-1)
#define VSID_MULTIPLIER_1T ASM_CONST(12538073) /* 24-bit prime */
-#define VSID_BITS_1T 26
+#define VSID_BITS_1T (CONTEXT_BITS + ESID_BITS_1T)
#define VSID_MODULUS_1T ((1UL<<VSID_BITS_1T)-1)
-#define CONTEXT_BITS 19
-#define USER_ESID_BITS 18
-#define USER_ESID_BITS_1T 6
-#define USER_VSID_RANGE (1UL << (USER_ESID_BITS + SID_SHIFT))
+#define USER_VSID_RANGE (1UL << (ESID_BITS + SID_SHIFT))
/*
* This macro generates asm code to compute the VSID scramble
@@ -421,7 +428,8 @@
srdi rx,rt,VSID_BITS_##size; \
clrldi rt,rt,(64-VSID_BITS_##size); \
add rt,rt,rx; /* add high and low bits */ \
- /* Now, r3 == VSID (mod 2^36-1), and lies between 0 and \
+ /* NOTE: explanation based on VSID_BITS_##size = 36 \
+ * Now, r3 == VSID (mod 2^36-1), and lies between 0 and \
* 2^36-1+2^28-1. That in particular means that if r3 >= \
* 2^36-1, then r3+1 has the 2^36 bit set. So, if r3+1 has \
* the bit clear, r3 already has the answer we want, if it \
@@ -513,34 +521,6 @@
})
#endif /* 1 */
-/*
- * This is only valid for addresses >= PAGE_OFFSET
- * The proto-VSID space is divided into two class
- * User: 0 to 2^(CONTEXT_BITS + USER_ESID_BITS) -1
- * kernel: 2^(CONTEXT_BITS + USER_ESID_BITS) to 2^(VSID_BITS) - 1
- *
- * With KERNEL_START at 0xc000000000000000, the proto vsid for
- * the kernel ends up with 0xc00000000 (36 bits). With 64TB
- * support we need to have kernel proto-VSID in the
- * [2^37 to 2^38 - 1] range due to the increased USER_ESID_BITS.
- */
-static inline unsigned long get_kernel_vsid(unsigned long ea, int ssize)
-{
- unsigned long proto_vsid;
- /*
- * We need to make sure proto_vsid for the kernel is
- * >= 2^(CONTEXT_BITS + USER_ESID_BITS[_1T])
- */
- if (ssize == MMU_SEGSIZE_256M) {
- proto_vsid = ea >> SID_SHIFT;
- proto_vsid |= (1UL << (CONTEXT_BITS + USER_ESID_BITS));
- return vsid_scramble(proto_vsid, 256M);
- }
- proto_vsid = ea >> SID_SHIFT_1T;
- proto_vsid |= (1UL << (CONTEXT_BITS + USER_ESID_BITS_1T));
- return vsid_scramble(proto_vsid, 1T);
-}
-
/* Returns the segment size indicator for a user address */
static inline int user_segment_size(unsigned long addr)
{
@@ -550,17 +530,41 @@
return MMU_SEGSIZE_256M;
}
-/* This is only valid for user addresses (which are below 2^44) */
static inline unsigned long get_vsid(unsigned long context, unsigned long ea,
int ssize)
{
+ /*
+ * Bad address. We return VSID 0 for that
+ */
+ if ((ea & ~REGION_MASK) >= PGTABLE_RANGE)
+ return 0;
+
if (ssize == MMU_SEGSIZE_256M)
- return vsid_scramble((context << USER_ESID_BITS)
+ return vsid_scramble((context << ESID_BITS)
| (ea >> SID_SHIFT), 256M);
- return vsid_scramble((context << USER_ESID_BITS_1T)
+ return vsid_scramble((context << ESID_BITS_1T)
| (ea >> SID_SHIFT_1T), 1T);
}
+/*
+ * This is only valid for addresses >= PAGE_OFFSET
+ *
+ * For kernel space, we use the top 4 context ids to map address as below
+ * 0x7fffc - [ 0xc000000000000000 - 0xc0003fffffffffff ]
+ * 0x7fffd - [ 0xd000000000000000 - 0xd0003fffffffffff ]
+ * 0x7fffe - [ 0xe000000000000000 - 0xe0003fffffffffff ]
+ * 0x7ffff - [ 0xf000000000000000 - 0xf0003fffffffffff ]
+ */
+static inline unsigned long get_kernel_vsid(unsigned long ea, int ssize)
+{
+ unsigned long context;
+
+ /*
+ * kernel take the top 4 context from the available range
+ */
+ context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1;
+ return get_vsid(context, ea, ssize);
+}
#endif /* __ASSEMBLY__ */
#endif /* _ASM_POWERPC_MMU_HASH64_H_ */
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index 75a3d71..19599ef 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -275,7 +275,7 @@
.cpu_features = CPU_FTRS_PPC970,
.cpu_user_features = COMMON_USER_POWER4 |
PPC_FEATURE_HAS_ALTIVEC_COMP,
- .mmu_features = MMU_FTR_HPTE_TABLE,
+ .mmu_features = MMU_FTRS_PPC970,
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 8,
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 87ef8f5..200afa5 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -1452,20 +1452,36 @@
_GLOBAL(do_stab_bolted)
stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
std r11,PACA_EXSLB+EX_SRR0(r13) /* save SRR0 in exc. frame */
+ mfspr r11,SPRN_DAR /* ea */
+ /*
+ * check for bad kernel/user address
+ * (ea & ~REGION_MASK) >= PGTABLE_RANGE
+ */
+ rldicr. r9,r11,4,(63 - 46 - 4)
+ li r9,0 /* VSID = 0 for bad address */
+ bne- 0f
+
+ /*
+ * Calculate VSID:
+ * This is the kernel vsid, we take the top for context from
+ * the range. context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1
+ * Here we know that (ea >> 60) == 0xc
+ */
+ lis r9,(MAX_USER_CONTEXT + 1)@ha
+ addi r9,r9,(MAX_USER_CONTEXT + 1)@l
+
+ srdi r10,r11,SID_SHIFT
+ rldimi r10,r9,ESID_BITS,0 /* proto vsid */
+ ASM_VSID_SCRAMBLE(r10, r9, 256M)
+ rldic r9,r10,12,16 /* r9 = vsid << 12 */
+
+0:
/* Hash to the primary group */
ld r10,PACASTABVIRT(r13)
- mfspr r11,SPRN_DAR
- srdi r11,r11,28
+ srdi r11,r11,SID_SHIFT
rldimi r10,r11,7,52 /* r10 = first ste of the group */
- /* Calculate VSID */
- /* This is a kernel address, so protovsid = ESID | 1 << 37 */
- li r9,0x1
- rldimi r11,r9,(CONTEXT_BITS + USER_ESID_BITS),0
- ASM_VSID_SCRAMBLE(r11, r9, 256M)
- rldic r9,r11,12,16 /* r9 = vsid << 12 */
-
/* Search the primary group for a free entry */
1: ld r11,0(r10) /* Test valid bit of the current ste */
andi. r11,r11,0x80
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 7f7fb7f..13f8d16 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -2832,11 +2832,13 @@
{
}
#else
-static void __reloc_toc(void *tocstart, unsigned long offset,
- unsigned long nr_entries)
+static void __reloc_toc(unsigned long offset, unsigned long nr_entries)
{
unsigned long i;
- unsigned long *toc_entry = (unsigned long *)tocstart;
+ unsigned long *toc_entry;
+
+ /* Get the start of the TOC by using r2 directly. */
+ asm volatile("addi %0,2,-0x8000" : "=b" (toc_entry));
for (i = 0; i < nr_entries; i++) {
*toc_entry = *toc_entry + offset;
@@ -2850,8 +2852,7 @@
unsigned long nr_entries =
(__prom_init_toc_end - __prom_init_toc_start) / sizeof(long);
- /* Need to add offset to get at __prom_init_toc_start */
- __reloc_toc(__prom_init_toc_start + offset, offset, nr_entries);
+ __reloc_toc(offset, nr_entries);
mb();
}
@@ -2864,8 +2865,7 @@
mb();
- /* __prom_init_toc_start has been relocated, no need to add offset */
- __reloc_toc(__prom_init_toc_start, -offset, nr_entries);
+ __reloc_toc(-offset, nr_entries);
}
#endif
#endif
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index 245c1b6..f9b30c6 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -1428,6 +1428,7 @@
brk.address = bp_info->addr & ~7UL;
brk.type = HW_BRK_TYPE_TRANSLATE;
+ brk.len = 8;
if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
brk.type |= HW_BRK_TYPE_READ;
if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c
index ead58e3..5d7d29a 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_host.c
@@ -326,8 +326,8 @@
vcpu3s->context_id[0] = err;
vcpu3s->proto_vsid_max = ((vcpu3s->context_id[0] + 1)
- << USER_ESID_BITS) - 1;
- vcpu3s->proto_vsid_first = vcpu3s->context_id[0] << USER_ESID_BITS;
+ << ESID_BITS) - 1;
+ vcpu3s->proto_vsid_first = vcpu3s->context_id[0] << ESID_BITS;
vcpu3s->proto_vsid_next = vcpu3s->proto_vsid_first;
kvmppc_mmu_hpte_init(vcpu);
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 1b6e127..f410c3e 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -195,6 +195,11 @@
unsigned long vpn = hpt_vpn(vaddr, vsid, ssize);
unsigned long tprot = prot;
+ /*
+ * If we hit a bad address return error.
+ */
+ if (!vsid)
+ return -1;
/* Make kernel text executable */
if (overlaps_kernel_text(vaddr, vaddr + step))
tprot &= ~HPTE_R_N;
@@ -759,6 +764,8 @@
/* Initialize stab / SLB management */
if (mmu_has_feature(MMU_FTR_SLB))
slb_initialize();
+ else
+ stab_initialize(get_paca()->stab_real);
}
#ifdef CONFIG_SMP
@@ -922,11 +929,6 @@
DBG_LOW("hash_page(ea=%016lx, access=%lx, trap=%lx\n",
ea, access, trap);
- if ((ea & ~REGION_MASK) >= PGTABLE_RANGE) {
- DBG_LOW(" out of pgtable range !\n");
- return 1;
- }
-
/* Get region & vsid */
switch (REGION_ID(ea)) {
case USER_REGION_ID:
@@ -957,6 +959,11 @@
}
DBG_LOW(" mm=%p, mm->pgdir=%p, vsid=%016lx\n", mm, mm->pgd, vsid);
+ /* Bad address. */
+ if (!vsid) {
+ DBG_LOW("Bad address!\n");
+ return 1;
+ }
/* Get pgdir */
pgdir = mm->pgd;
if (pgdir == NULL)
@@ -1126,6 +1133,8 @@
/* Get VSID */
ssize = user_segment_size(ea);
vsid = get_vsid(mm->context.id, ea, ssize);
+ if (!vsid)
+ return;
/* Hash doesn't like irqs */
local_irq_save(flags);
@@ -1233,6 +1242,9 @@
hash = hpt_hash(vpn, PAGE_SHIFT, mmu_kernel_ssize);
hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
+ /* Don't create HPTE entries for bad address */
+ if (!vsid)
+ return;
ret = ppc_md.hpte_insert(hpteg, vpn, __pa(vaddr),
mode, HPTE_V_BOLTED,
mmu_linear_psize, mmu_kernel_ssize);
diff --git a/arch/powerpc/mm/mmu_context_hash64.c b/arch/powerpc/mm/mmu_context_hash64.c
index 40bc5b0..d1d1b92 100644
--- a/arch/powerpc/mm/mmu_context_hash64.c
+++ b/arch/powerpc/mm/mmu_context_hash64.c
@@ -29,15 +29,6 @@
static DEFINE_SPINLOCK(mmu_context_lock);
static DEFINE_IDA(mmu_context_ida);
-/*
- * 256MB segment
- * The proto-VSID space has 2^(CONTEX_BITS + USER_ESID_BITS) - 1 segments
- * available for user mappings. Each segment contains 2^28 bytes. Each
- * context maps 2^46 bytes (64TB) so we can support 2^19-1 contexts
- * (19 == 37 + 28 - 46).
- */
-#define MAX_CONTEXT ((1UL << CONTEXT_BITS) - 1)
-
int __init_new_context(void)
{
int index;
@@ -56,7 +47,7 @@
else if (err)
return err;
- if (index > MAX_CONTEXT) {
+ if (index > MAX_USER_CONTEXT) {
spin_lock(&mmu_context_lock);
ida_remove(&mmu_context_ida, index);
spin_unlock(&mmu_context_lock);
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index e212a27..654258f 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -61,7 +61,7 @@
#endif
#ifdef CONFIG_PPC_STD_MMU_64
-#if TASK_SIZE_USER64 > (1UL << (USER_ESID_BITS + SID_SHIFT))
+#if TASK_SIZE_USER64 > (1UL << (ESID_BITS + SID_SHIFT))
#error TASK_SIZE_USER64 exceeds user VSID range
#endif
#endif
diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S
index 1a16ca2..17aa6df 100644
--- a/arch/powerpc/mm/slb_low.S
+++ b/arch/powerpc/mm/slb_low.S
@@ -31,10 +31,15 @@
* No other registers are examined or changed.
*/
_GLOBAL(slb_allocate_realmode)
- /* r3 = faulting address */
+ /*
+ * check for bad kernel/user address
+ * (ea & ~REGION_MASK) >= PGTABLE_RANGE
+ */
+ rldicr. r9,r3,4,(63 - 46 - 4)
+ bne- 8f
srdi r9,r3,60 /* get region */
- srdi r10,r3,28 /* get esid */
+ srdi r10,r3,SID_SHIFT /* get esid */
cmpldi cr7,r9,0xc /* cmp PAGE_OFFSET for later use */
/* r3 = address, r10 = esid, cr7 = <> PAGE_OFFSET */
@@ -56,12 +61,14 @@
*/
_GLOBAL(slb_miss_kernel_load_linear)
li r11,0
- li r9,0x1
/*
- * for 1T we shift 12 bits more. slb_finish_load_1T will do
- * the necessary adjustment
+ * context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1
+ * r9 = region id.
*/
- rldimi r10,r9,(CONTEXT_BITS + USER_ESID_BITS),0
+ addis r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@ha
+ addi r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@l
+
+
BEGIN_FTR_SECTION
b slb_finish_load
END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
@@ -91,24 +98,19 @@
_GLOBAL(slb_miss_kernel_load_io)
li r11,0
6:
- li r9,0x1
/*
- * for 1T we shift 12 bits more. slb_finish_load_1T will do
- * the necessary adjustment
+ * context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1
+ * r9 = region id.
*/
- rldimi r10,r9,(CONTEXT_BITS + USER_ESID_BITS),0
+ addis r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@ha
+ addi r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@l
+
BEGIN_FTR_SECTION
b slb_finish_load
END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
b slb_finish_load_1T
-0: /* user address: proto-VSID = context << 15 | ESID. First check
- * if the address is within the boundaries of the user region
- */
- srdi. r9,r10,USER_ESID_BITS
- bne- 8f /* invalid ea bits set */
-
-
+0:
/* when using slices, we extract the psize off the slice bitmaps
* and then we need to get the sllp encoding off the mmu_psize_defs
* array.
@@ -164,15 +166,13 @@
ld r9,PACACONTEXTID(r13)
BEGIN_FTR_SECTION
cmpldi r10,0x1000
-END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
- rldimi r10,r9,USER_ESID_BITS,0
-BEGIN_FTR_SECTION
bge slb_finish_load_1T
END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
b slb_finish_load
8: /* invalid EA */
li r10,0 /* BAD_VSID */
+ li r9,0 /* BAD_VSID */
li r11,SLB_VSID_USER /* flags don't much matter */
b slb_finish_load
@@ -221,8 +221,6 @@
/* get context to calculate proto-VSID */
ld r9,PACACONTEXTID(r13)
- rldimi r10,r9,USER_ESID_BITS,0
-
/* fall through slb_finish_load */
#endif /* __DISABLED__ */
@@ -231,9 +229,10 @@
/*
* Finish loading of an SLB entry and return
*
- * r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9, cr7 = <> PAGE_OFFSET
+ * r3 = EA, r9 = context, r10 = ESID, r11 = flags, clobbers r9, cr7 = <> PAGE_OFFSET
*/
slb_finish_load:
+ rldimi r10,r9,ESID_BITS,0
ASM_VSID_SCRAMBLE(r10,r9,256M)
/*
* bits above VSID_BITS_256M need to be ignored from r10
@@ -298,10 +297,11 @@
/*
* Finish loading of a 1T SLB entry (for the kernel linear mapping) and return.
*
- * r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9
+ * r3 = EA, r9 = context, r10 = ESID(256MB), r11 = flags, clobbers r9
*/
slb_finish_load_1T:
- srdi r10,r10,40-28 /* get 1T ESID */
+ srdi r10,r10,(SID_SHIFT_1T - SID_SHIFT) /* get 1T ESID */
+ rldimi r10,r9,ESID_BITS_1T,0
ASM_VSID_SCRAMBLE(r10,r9,1T)
/*
* bits above VSID_BITS_1T need to be ignored from r10
diff --git a/arch/powerpc/mm/tlb_hash64.c b/arch/powerpc/mm/tlb_hash64.c
index 0d82ef5..023ec8a 100644
--- a/arch/powerpc/mm/tlb_hash64.c
+++ b/arch/powerpc/mm/tlb_hash64.c
@@ -82,11 +82,11 @@
if (!is_kernel_addr(addr)) {
ssize = user_segment_size(addr);
vsid = get_vsid(mm->context.id, addr, ssize);
- WARN_ON(vsid == 0);
} else {
vsid = get_kernel_vsid(addr, mmu_kernel_ssize);
ssize = mmu_kernel_ssize;
}
+ WARN_ON(vsid == 0);
vpn = hpt_vpn(addr, vsid, ssize);
rpte = __real_pte(__pte(pte), ptep);
diff --git a/arch/powerpc/platforms/85xx/sgy_cts1000.c b/arch/powerpc/platforms/85xx/sgy_cts1000.c
index 611e92f..7179726 100644
--- a/arch/powerpc/platforms/85xx/sgy_cts1000.c
+++ b/arch/powerpc/platforms/85xx/sgy_cts1000.c
@@ -69,7 +69,7 @@
return IRQ_HANDLED;
};
-static int __devinit gpio_halt_probe(struct platform_device *pdev)
+static int gpio_halt_probe(struct platform_device *pdev)
{
enum of_gpio_flags flags;
struct device_node *node = pdev->dev.of_node;
@@ -128,7 +128,7 @@
return 0;
}
-static int __devexit gpio_halt_remove(struct platform_device *pdev)
+static int gpio_halt_remove(struct platform_device *pdev)
{
if (halt_node) {
int gpio = of_get_gpio(halt_node, 0);
@@ -165,7 +165,7 @@
.of_match_table = gpio_halt_match,
},
.probe = gpio_halt_probe,
- .remove = __devexit_p(gpio_halt_remove),
+ .remove = gpio_halt_remove,
};
module_platform_driver(gpio_halt_driver);
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index cea2f09..18e3b76 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -124,9 +124,8 @@
select PPC_HAVE_PMU_SUPPORT
config POWER3
- bool
depends on PPC64 && PPC_BOOK3S
- default y if !POWER4_ONLY
+ def_bool y
config POWER4
depends on PPC64 && PPC_BOOK3S
@@ -145,8 +144,7 @@
but somewhat slower on other machines. This option only changes
the scheduling of instructions, not the selection of instructions
itself, so the resulting kernel will keep running on all other
- machines. When building a kernel that is supposed to run only
- on Cell, you should also select the POWER4_ONLY option.
+ machines.
# this is temp to handle compat with arch=ppc
config 8xx
diff --git a/arch/s390/include/asm/eadm.h b/arch/s390/include/asm/eadm.h
index 8d48471..dc9200c 100644
--- a/arch/s390/include/asm/eadm.h
+++ b/arch/s390/include/asm/eadm.h
@@ -34,6 +34,8 @@
u32 reserved[4];
} __packed;
+#define EQC_WR_PROHIBIT 22
+
struct msb {
u8 fmt:4;
u8 oc:4;
@@ -96,11 +98,13 @@
#define OP_STATE_TEMP_ERR 2
#define OP_STATE_PERM_ERR 3
+enum scm_event {SCM_CHANGE, SCM_AVAIL};
+
struct scm_driver {
struct device_driver drv;
int (*probe) (struct scm_device *scmdev);
int (*remove) (struct scm_device *scmdev);
- void (*notify) (struct scm_device *scmdev);
+ void (*notify) (struct scm_device *scmdev, enum scm_event event);
void (*handler) (struct scm_device *scmdev, void *data, int error);
};
diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h
index 1d8fe2b..6b32af3 100644
--- a/arch/s390/include/asm/tlbflush.h
+++ b/arch/s390/include/asm/tlbflush.h
@@ -74,8 +74,6 @@
static inline void __tlb_flush_mm(struct mm_struct * mm)
{
- if (unlikely(cpumask_empty(mm_cpumask(mm))))
- return;
/*
* If the machine has IDTE we prefer to do a per mm flush
* on all cpus instead of doing a local flush if the mm
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 5502285..94feff7 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -636,7 +636,8 @@
UPDATE_VTIME %r14,%r15,__LC_MCCK_ENTER_TIMER
mcck_skip:
SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+32,__LC_PANIC_STACK,PAGE_SHIFT
- mvc __PT_R0(64,%r11),__LC_GPREGS_SAVE_AREA
+ stm %r0,%r7,__PT_R0(%r11)
+ mvc __PT_R8(32,%r11),__LC_GPREGS_SAVE_AREA+32
stm %r8,%r9,__PT_PSW(%r11)
xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
l %r1,BASED(.Ldo_machine_check)
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index 9c837c1..2e6d60c 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -678,8 +678,9 @@
UPDATE_VTIME %r14,__LC_MCCK_ENTER_TIMER
LAST_BREAK %r14
mcck_skip:
- lghi %r14,__LC_GPREGS_SAVE_AREA
- mvc __PT_R0(128,%r11),0(%r14)
+ lghi %r14,__LC_GPREGS_SAVE_AREA+64
+ stmg %r0,%r7,__PT_R0(%r11)
+ mvc __PT_R8(64,%r11),0(%r14)
stmg %r8,%r9,__PT_PSW(%r11)
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
lgr %r2,%r11 # pass pointer to pt_regs
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index a5360de..2926885 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -571,6 +571,8 @@
/* Split remaining virtual space between 1:1 mapping & vmemmap array */
tmp = VMALLOC_START / (PAGE_SIZE + sizeof(struct page));
+ /* vmemmap contains a multiple of PAGES_PER_SECTION struct pages */
+ tmp = SECTION_ALIGN_UP(tmp);
tmp = VMALLOC_START - tmp * sizeof(struct page);
tmp &= ~((vmax >> 11) - 1); /* align to page table level */
tmp = min(tmp, 1UL << MAX_PHYSMEM_BITS);
diff --git a/drivers/amba/tegra-ahb.c b/drivers/amba/tegra-ahb.c
index 093c435..1f44e56 100644
--- a/drivers/amba/tegra-ahb.c
+++ b/drivers/amba/tegra-ahb.c
@@ -158,7 +158,7 @@
EXPORT_SYMBOL(tegra_ahb_enable_smmu);
#endif
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_PM
static int tegra_ahb_suspend(struct device *dev)
{
int i;
diff --git a/drivers/clk/clk-vt8500.c b/drivers/clk/clk-vt8500.c
index b5538bb..09c6331 100644
--- a/drivers/clk/clk-vt8500.c
+++ b/drivers/clk/clk-vt8500.c
@@ -157,7 +157,7 @@
divisor = parent_rate / rate;
/* If prate / rate would be decimal, incr the divisor */
- if (rate * divisor < *prate)
+ if (rate * divisor < parent_rate)
divisor++;
if (divisor == cdev->div_mask + 1)
diff --git a/drivers/i2c/Kconfig b/drivers/i2c/Kconfig
index 46cde098..e380c6e 100644
--- a/drivers/i2c/Kconfig
+++ b/drivers/i2c/Kconfig
@@ -4,7 +4,6 @@
menuconfig I2C
tristate "I2C support"
- depends on !S390
select RT_MUTEXES
---help---
I2C (pronounce: I-squared-C) is a slow serial bus protocol used in
@@ -76,6 +75,7 @@
config I2C_SMBUS
tristate "SMBus-specific protocols" if !I2C_HELPER_AUTO
+ depends on GENERIC_HARDIRQS
help
Say Y here if you want support for SMBus extensions to the I2C
specification. At the moment, the only supported extension is
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index a3725de..adfee98 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -114,7 +114,7 @@
config I2C_ISCH
tristate "Intel SCH SMBus 1.0"
- depends on PCI
+ depends on PCI && GENERIC_HARDIRQS
select LPC_SCH
help
Say Y here if you want to use SMBus controller on the Intel SCH
@@ -543,6 +543,7 @@
config I2C_OCORES
tristate "OpenCores I2C Controller"
+ depends on GENERIC_HARDIRQS
help
If you say yes to this option, support will be included for the
OpenCores I2C controller. For details see
@@ -777,7 +778,7 @@
config I2C_PARPORT
tristate "Parallel port adapter"
- depends on PARPORT
+ depends on PARPORT && GENERIC_HARDIRQS
select I2C_ALGOBIT
select I2C_SMBUS
help
@@ -802,6 +803,7 @@
config I2C_PARPORT_LIGHT
tristate "Parallel port adapter (light)"
+ depends on GENERIC_HARDIRQS
select I2C_ALGOBIT
select I2C_SMBUS
help
diff --git a/drivers/input/joystick/analog.c b/drivers/input/joystick/analog.c
index 7cd74e2..9135606 100644
--- a/drivers/input/joystick/analog.c
+++ b/drivers/input/joystick/analog.c
@@ -158,14 +158,10 @@
#define GET_TIME(x) rdtscl(x)
#define DELTA(x,y) ((y)-(x))
#define TIME_NAME "TSC"
-#elif defined(__alpha__)
+#elif defined(__alpha__) || defined(CONFIG_MN10300) || defined(CONFIG_ARM) || defined(CONFIG_TILE)
#define GET_TIME(x) do { x = get_cycles(); } while (0)
#define DELTA(x,y) ((y)-(x))
-#define TIME_NAME "PCC"
-#elif defined(CONFIG_MN10300) || defined(CONFIG_TILE)
-#define GET_TIME(x) do { x = get_cycles(); } while (0)
-#define DELTA(x, y) ((x) - (y))
-#define TIME_NAME "TSC"
+#define TIME_NAME "get_cycles"
#else
#define FAKE_TIME
static unsigned long analog_faketime = 0;
diff --git a/drivers/mtd/bcm47xxpart.c b/drivers/mtd/bcm47xxpart.c
index 63feb75..9279a91 100644
--- a/drivers/mtd/bcm47xxpart.c
+++ b/drivers/mtd/bcm47xxpart.c
@@ -19,6 +19,12 @@
/* 10 parts were found on sflash on Netgear WNDR4500 */
#define BCM47XXPART_MAX_PARTS 12
+/*
+ * Amount of bytes we read when analyzing each block of flash memory.
+ * Set it big enough to allow detecting partition and reading important data.
+ */
+#define BCM47XXPART_BYTES_TO_READ 0x404
+
/* Magics */
#define BOARD_DATA_MAGIC 0x5246504D /* MPFR */
#define POT_MAGIC1 0x54544f50 /* POTT */
@@ -57,17 +63,15 @@
struct trx_header *trx;
int trx_part = -1;
int last_trx_part = -1;
- int max_bytes_to_read = 0x8004;
+ int possible_nvram_sizes[] = { 0x8000, 0xF000, 0x10000, };
if (blocksize <= 0x10000)
blocksize = 0x10000;
- if (blocksize == 0x20000)
- max_bytes_to_read = 0x18004;
/* Alloc */
parts = kzalloc(sizeof(struct mtd_partition) * BCM47XXPART_MAX_PARTS,
GFP_KERNEL);
- buf = kzalloc(max_bytes_to_read, GFP_KERNEL);
+ buf = kzalloc(BCM47XXPART_BYTES_TO_READ, GFP_KERNEL);
/* Parse block by block looking for magics */
for (offset = 0; offset <= master->size - blocksize;
@@ -82,7 +86,7 @@
}
/* Read beginning of the block */
- if (mtd_read(master, offset, max_bytes_to_read,
+ if (mtd_read(master, offset, BCM47XXPART_BYTES_TO_READ,
&bytes_read, (uint8_t *)buf) < 0) {
pr_err("mtd_read error while parsing (offset: 0x%X)!\n",
offset);
@@ -96,20 +100,6 @@
continue;
}
- /* Standard NVRAM */
- if (buf[0x000 / 4] == NVRAM_HEADER ||
- buf[0x1000 / 4] == NVRAM_HEADER ||
- buf[0x8000 / 4] == NVRAM_HEADER ||
- (blocksize == 0x20000 && (
- buf[0x10000 / 4] == NVRAM_HEADER ||
- buf[0x11000 / 4] == NVRAM_HEADER ||
- buf[0x18000 / 4] == NVRAM_HEADER))) {
- bcm47xxpart_add_part(&parts[curr_part++], "nvram",
- offset, 0);
- offset = rounddown(offset, blocksize);
- continue;
- }
-
/*
* board_data starts with board_id which differs across boards,
* but we can use 'MPFR' (hopefully) magic at 0x100
@@ -178,6 +168,30 @@
continue;
}
}
+
+ /* Look for NVRAM at the end of the last block. */
+ for (i = 0; i < ARRAY_SIZE(possible_nvram_sizes); i++) {
+ if (curr_part > BCM47XXPART_MAX_PARTS) {
+ pr_warn("Reached maximum number of partitions, scanning stopped!\n");
+ break;
+ }
+
+ offset = master->size - possible_nvram_sizes[i];
+ if (mtd_read(master, offset, 0x4, &bytes_read,
+ (uint8_t *)buf) < 0) {
+ pr_err("mtd_read error while reading at offset 0x%X!\n",
+ offset);
+ continue;
+ }
+
+ /* Standard NVRAM */
+ if (buf[0] == NVRAM_HEADER) {
+ bcm47xxpart_add_part(&parts[curr_part++], "nvram",
+ master->size - blocksize, 0);
+ break;
+ }
+ }
+
kfree(buf);
/*
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 4321415..42c6392 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -1523,6 +1523,14 @@
oobreadlen -= toread;
}
}
+
+ if (chip->options & NAND_NEED_READRDY) {
+ /* Apply delay or wait for ready/busy pin */
+ if (!chip->dev_ready)
+ udelay(chip->chip_delay);
+ else
+ nand_wait_ready(mtd);
+ }
} else {
memcpy(buf, chip->buffers->databuf + col, bytes);
buf += bytes;
@@ -1787,6 +1795,14 @@
len = min(len, readlen);
buf = nand_transfer_oob(chip, buf, ops, len);
+ if (chip->options & NAND_NEED_READRDY) {
+ /* Apply delay or wait for ready/busy pin */
+ if (!chip->dev_ready)
+ udelay(chip->chip_delay);
+ else
+ nand_wait_ready(mtd);
+ }
+
readlen -= len;
if (!readlen)
break;
diff --git a/drivers/mtd/nand/nand_ids.c b/drivers/mtd/nand/nand_ids.c
index e3aa274..9c61238 100644
--- a/drivers/mtd/nand/nand_ids.c
+++ b/drivers/mtd/nand/nand_ids.c
@@ -22,49 +22,51 @@
* 512 512 Byte page size
*/
struct nand_flash_dev nand_flash_ids[] = {
+#define SP_OPTIONS NAND_NEED_READRDY
+#define SP_OPTIONS16 (SP_OPTIONS | NAND_BUSWIDTH_16)
#ifdef CONFIG_MTD_NAND_MUSEUM_IDS
- {"NAND 1MiB 5V 8-bit", 0x6e, 256, 1, 0x1000, 0},
- {"NAND 2MiB 5V 8-bit", 0x64, 256, 2, 0x1000, 0},
- {"NAND 4MiB 5V 8-bit", 0x6b, 512, 4, 0x2000, 0},
- {"NAND 1MiB 3,3V 8-bit", 0xe8, 256, 1, 0x1000, 0},
- {"NAND 1MiB 3,3V 8-bit", 0xec, 256, 1, 0x1000, 0},
- {"NAND 2MiB 3,3V 8-bit", 0xea, 256, 2, 0x1000, 0},
- {"NAND 4MiB 3,3V 8-bit", 0xd5, 512, 4, 0x2000, 0},
- {"NAND 4MiB 3,3V 8-bit", 0xe3, 512, 4, 0x2000, 0},
- {"NAND 4MiB 3,3V 8-bit", 0xe5, 512, 4, 0x2000, 0},
- {"NAND 8MiB 3,3V 8-bit", 0xd6, 512, 8, 0x2000, 0},
+ {"NAND 1MiB 5V 8-bit", 0x6e, 256, 1, 0x1000, SP_OPTIONS},
+ {"NAND 2MiB 5V 8-bit", 0x64, 256, 2, 0x1000, SP_OPTIONS},
+ {"NAND 4MiB 5V 8-bit", 0x6b, 512, 4, 0x2000, SP_OPTIONS},
+ {"NAND 1MiB 3,3V 8-bit", 0xe8, 256, 1, 0x1000, SP_OPTIONS},
+ {"NAND 1MiB 3,3V 8-bit", 0xec, 256, 1, 0x1000, SP_OPTIONS},
+ {"NAND 2MiB 3,3V 8-bit", 0xea, 256, 2, 0x1000, SP_OPTIONS},
+ {"NAND 4MiB 3,3V 8-bit", 0xd5, 512, 4, 0x2000, SP_OPTIONS},
+ {"NAND 4MiB 3,3V 8-bit", 0xe3, 512, 4, 0x2000, SP_OPTIONS},
+ {"NAND 4MiB 3,3V 8-bit", 0xe5, 512, 4, 0x2000, SP_OPTIONS},
+ {"NAND 8MiB 3,3V 8-bit", 0xd6, 512, 8, 0x2000, SP_OPTIONS},
- {"NAND 8MiB 1,8V 8-bit", 0x39, 512, 8, 0x2000, 0},
- {"NAND 8MiB 3,3V 8-bit", 0xe6, 512, 8, 0x2000, 0},
- {"NAND 8MiB 1,8V 16-bit", 0x49, 512, 8, 0x2000, NAND_BUSWIDTH_16},
- {"NAND 8MiB 3,3V 16-bit", 0x59, 512, 8, 0x2000, NAND_BUSWIDTH_16},
+ {"NAND 8MiB 1,8V 8-bit", 0x39, 512, 8, 0x2000, SP_OPTIONS},
+ {"NAND 8MiB 3,3V 8-bit", 0xe6, 512, 8, 0x2000, SP_OPTIONS},
+ {"NAND 8MiB 1,8V 16-bit", 0x49, 512, 8, 0x2000, SP_OPTIONS16},
+ {"NAND 8MiB 3,3V 16-bit", 0x59, 512, 8, 0x2000, SP_OPTIONS16},
#endif
- {"NAND 16MiB 1,8V 8-bit", 0x33, 512, 16, 0x4000, 0},
- {"NAND 16MiB 3,3V 8-bit", 0x73, 512, 16, 0x4000, 0},
- {"NAND 16MiB 1,8V 16-bit", 0x43, 512, 16, 0x4000, NAND_BUSWIDTH_16},
- {"NAND 16MiB 3,3V 16-bit", 0x53, 512, 16, 0x4000, NAND_BUSWIDTH_16},
+ {"NAND 16MiB 1,8V 8-bit", 0x33, 512, 16, 0x4000, SP_OPTIONS},
+ {"NAND 16MiB 3,3V 8-bit", 0x73, 512, 16, 0x4000, SP_OPTIONS},
+ {"NAND 16MiB 1,8V 16-bit", 0x43, 512, 16, 0x4000, SP_OPTIONS16},
+ {"NAND 16MiB 3,3V 16-bit", 0x53, 512, 16, 0x4000, SP_OPTIONS16},
- {"NAND 32MiB 1,8V 8-bit", 0x35, 512, 32, 0x4000, 0},
- {"NAND 32MiB 3,3V 8-bit", 0x75, 512, 32, 0x4000, 0},
- {"NAND 32MiB 1,8V 16-bit", 0x45, 512, 32, 0x4000, NAND_BUSWIDTH_16},
- {"NAND 32MiB 3,3V 16-bit", 0x55, 512, 32, 0x4000, NAND_BUSWIDTH_16},
+ {"NAND 32MiB 1,8V 8-bit", 0x35, 512, 32, 0x4000, SP_OPTIONS},
+ {"NAND 32MiB 3,3V 8-bit", 0x75, 512, 32, 0x4000, SP_OPTIONS},
+ {"NAND 32MiB 1,8V 16-bit", 0x45, 512, 32, 0x4000, SP_OPTIONS16},
+ {"NAND 32MiB 3,3V 16-bit", 0x55, 512, 32, 0x4000, SP_OPTIONS16},
- {"NAND 64MiB 1,8V 8-bit", 0x36, 512, 64, 0x4000, 0},
- {"NAND 64MiB 3,3V 8-bit", 0x76, 512, 64, 0x4000, 0},
- {"NAND 64MiB 1,8V 16-bit", 0x46, 512, 64, 0x4000, NAND_BUSWIDTH_16},
- {"NAND 64MiB 3,3V 16-bit", 0x56, 512, 64, 0x4000, NAND_BUSWIDTH_16},
+ {"NAND 64MiB 1,8V 8-bit", 0x36, 512, 64, 0x4000, SP_OPTIONS},
+ {"NAND 64MiB 3,3V 8-bit", 0x76, 512, 64, 0x4000, SP_OPTIONS},
+ {"NAND 64MiB 1,8V 16-bit", 0x46, 512, 64, 0x4000, SP_OPTIONS16},
+ {"NAND 64MiB 3,3V 16-bit", 0x56, 512, 64, 0x4000, SP_OPTIONS16},
- {"NAND 128MiB 1,8V 8-bit", 0x78, 512, 128, 0x4000, 0},
- {"NAND 128MiB 1,8V 8-bit", 0x39, 512, 128, 0x4000, 0},
- {"NAND 128MiB 3,3V 8-bit", 0x79, 512, 128, 0x4000, 0},
- {"NAND 128MiB 1,8V 16-bit", 0x72, 512, 128, 0x4000, NAND_BUSWIDTH_16},
- {"NAND 128MiB 1,8V 16-bit", 0x49, 512, 128, 0x4000, NAND_BUSWIDTH_16},
- {"NAND 128MiB 3,3V 16-bit", 0x74, 512, 128, 0x4000, NAND_BUSWIDTH_16},
- {"NAND 128MiB 3,3V 16-bit", 0x59, 512, 128, 0x4000, NAND_BUSWIDTH_16},
+ {"NAND 128MiB 1,8V 8-bit", 0x78, 512, 128, 0x4000, SP_OPTIONS},
+ {"NAND 128MiB 1,8V 8-bit", 0x39, 512, 128, 0x4000, SP_OPTIONS},
+ {"NAND 128MiB 3,3V 8-bit", 0x79, 512, 128, 0x4000, SP_OPTIONS},
+ {"NAND 128MiB 1,8V 16-bit", 0x72, 512, 128, 0x4000, SP_OPTIONS16},
+ {"NAND 128MiB 1,8V 16-bit", 0x49, 512, 128, 0x4000, SP_OPTIONS16},
+ {"NAND 128MiB 3,3V 16-bit", 0x74, 512, 128, 0x4000, SP_OPTIONS16},
+ {"NAND 128MiB 3,3V 16-bit", 0x59, 512, 128, 0x4000, SP_OPTIONS16},
- {"NAND 256MiB 3,3V 8-bit", 0x71, 512, 256, 0x4000, 0},
+ {"NAND 256MiB 3,3V 8-bit", 0x71, 512, 256, 0x4000, SP_OPTIONS},
/*
* These are the new chips with large page size. The pagesize and the
diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c
index 75933a6..efb7f10 100644
--- a/drivers/pinctrl/pinctrl-at91.c
+++ b/drivers/pinctrl/pinctrl-at91.c
@@ -1277,21 +1277,80 @@
}
#ifdef CONFIG_PM
+
+static u32 wakeups[MAX_GPIO_BANKS];
+static u32 backups[MAX_GPIO_BANKS];
+
static int gpio_irq_set_wake(struct irq_data *d, unsigned state)
{
struct at91_gpio_chip *at91_gpio = irq_data_get_irq_chip_data(d);
unsigned bank = at91_gpio->pioc_idx;
+ unsigned mask = 1 << d->hwirq;
if (unlikely(bank >= MAX_GPIO_BANKS))
return -EINVAL;
+ if (state)
+ wakeups[bank] |= mask;
+ else
+ wakeups[bank] &= ~mask;
+
irq_set_irq_wake(at91_gpio->pioc_virq, state);
return 0;
}
+
+void at91_pinctrl_gpio_suspend(void)
+{
+ int i;
+
+ for (i = 0; i < gpio_banks; i++) {
+ void __iomem *pio;
+
+ if (!gpio_chips[i])
+ continue;
+
+ pio = gpio_chips[i]->regbase;
+
+ backups[i] = __raw_readl(pio + PIO_IMR);
+ __raw_writel(backups[i], pio + PIO_IDR);
+ __raw_writel(wakeups[i], pio + PIO_IER);
+
+ if (!wakeups[i]) {
+ clk_unprepare(gpio_chips[i]->clock);
+ clk_disable(gpio_chips[i]->clock);
+ } else {
+ printk(KERN_DEBUG "GPIO-%c may wake for %08x\n",
+ 'A'+i, wakeups[i]);
+ }
+ }
+}
+
+void at91_pinctrl_gpio_resume(void)
+{
+ int i;
+
+ for (i = 0; i < gpio_banks; i++) {
+ void __iomem *pio;
+
+ if (!gpio_chips[i])
+ continue;
+
+ pio = gpio_chips[i]->regbase;
+
+ if (!wakeups[i]) {
+ if (clk_prepare(gpio_chips[i]->clock) == 0)
+ clk_enable(gpio_chips[i]->clock);
+ }
+
+ __raw_writel(wakeups[i], pio + PIO_IDR);
+ __raw_writel(backups[i], pio + PIO_IER);
+ }
+}
+
#else
#define gpio_irq_set_wake NULL
-#endif
+#endif /* CONFIG_PM */
static struct irq_chip gpio_irqchip = {
.name = "GPIO",
diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c
index 9978ad4..5ac9c93 100644
--- a/drivers/s390/block/scm_blk.c
+++ b/drivers/s390/block/scm_blk.c
@@ -135,6 +135,11 @@
.release = scm_release,
};
+static bool scm_permit_request(struct scm_blk_dev *bdev, struct request *req)
+{
+ return rq_data_dir(req) != WRITE || bdev->state != SCM_WR_PROHIBIT;
+}
+
static void scm_request_prepare(struct scm_request *scmrq)
{
struct scm_blk_dev *bdev = scmrq->bdev;
@@ -195,14 +200,18 @@
scm_release_cluster(scmrq);
blk_requeue_request(bdev->rq, scmrq->request);
+ atomic_dec(&bdev->queued_reqs);
scm_request_done(scmrq);
scm_ensure_queue_restart(bdev);
}
void scm_request_finish(struct scm_request *scmrq)
{
+ struct scm_blk_dev *bdev = scmrq->bdev;
+
scm_release_cluster(scmrq);
blk_end_request_all(scmrq->request, scmrq->error);
+ atomic_dec(&bdev->queued_reqs);
scm_request_done(scmrq);
}
@@ -218,6 +227,10 @@
if (req->cmd_type != REQ_TYPE_FS)
continue;
+ if (!scm_permit_request(bdev, req)) {
+ scm_ensure_queue_restart(bdev);
+ return;
+ }
scmrq = scm_request_fetch();
if (!scmrq) {
SCM_LOG(5, "no request");
@@ -231,11 +244,13 @@
return;
}
if (scm_need_cluster_request(scmrq)) {
+ atomic_inc(&bdev->queued_reqs);
blk_start_request(req);
scm_initiate_cluster_request(scmrq);
return;
}
scm_request_prepare(scmrq);
+ atomic_inc(&bdev->queued_reqs);
blk_start_request(req);
ret = scm_start_aob(scmrq->aob);
@@ -244,7 +259,6 @@
scm_request_requeue(scmrq);
return;
}
- atomic_inc(&bdev->queued_reqs);
}
}
@@ -280,6 +294,38 @@
tasklet_hi_schedule(&bdev->tasklet);
}
+static void scm_blk_handle_error(struct scm_request *scmrq)
+{
+ struct scm_blk_dev *bdev = scmrq->bdev;
+ unsigned long flags;
+
+ if (scmrq->error != -EIO)
+ goto restart;
+
+ /* For -EIO the response block is valid. */
+ switch (scmrq->aob->response.eqc) {
+ case EQC_WR_PROHIBIT:
+ spin_lock_irqsave(&bdev->lock, flags);
+ if (bdev->state != SCM_WR_PROHIBIT)
+ pr_info("%lu: Write access to the SCM increment is suspended\n",
+ (unsigned long) bdev->scmdev->address);
+ bdev->state = SCM_WR_PROHIBIT;
+ spin_unlock_irqrestore(&bdev->lock, flags);
+ goto requeue;
+ default:
+ break;
+ }
+
+restart:
+ if (!scm_start_aob(scmrq->aob))
+ return;
+
+requeue:
+ spin_lock_irqsave(&bdev->rq_lock, flags);
+ scm_request_requeue(scmrq);
+ spin_unlock_irqrestore(&bdev->rq_lock, flags);
+}
+
static void scm_blk_tasklet(struct scm_blk_dev *bdev)
{
struct scm_request *scmrq;
@@ -293,11 +339,8 @@
spin_unlock_irqrestore(&bdev->lock, flags);
if (scmrq->error && scmrq->retries-- > 0) {
- if (scm_start_aob(scmrq->aob)) {
- spin_lock_irqsave(&bdev->rq_lock, flags);
- scm_request_requeue(scmrq);
- spin_unlock_irqrestore(&bdev->rq_lock, flags);
- }
+ scm_blk_handle_error(scmrq);
+
/* Request restarted or requeued, handle next. */
spin_lock_irqsave(&bdev->lock, flags);
continue;
@@ -310,7 +353,6 @@
}
scm_request_finish(scmrq);
- atomic_dec(&bdev->queued_reqs);
spin_lock_irqsave(&bdev->lock, flags);
}
spin_unlock_irqrestore(&bdev->lock, flags);
@@ -332,6 +374,7 @@
}
bdev->scmdev = scmdev;
+ bdev->state = SCM_OPER;
spin_lock_init(&bdev->rq_lock);
spin_lock_init(&bdev->lock);
INIT_LIST_HEAD(&bdev->finished_requests);
@@ -396,6 +439,18 @@
put_disk(bdev->gendisk);
}
+void scm_blk_set_available(struct scm_blk_dev *bdev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&bdev->lock, flags);
+ if (bdev->state == SCM_WR_PROHIBIT)
+ pr_info("%lu: Write access to the SCM increment is restored\n",
+ (unsigned long) bdev->scmdev->address);
+ bdev->state = SCM_OPER;
+ spin_unlock_irqrestore(&bdev->lock, flags);
+}
+
static int __init scm_blk_init(void)
{
int ret = -EINVAL;
diff --git a/drivers/s390/block/scm_blk.h b/drivers/s390/block/scm_blk.h
index 3c1ccf4..8b387b3 100644
--- a/drivers/s390/block/scm_blk.h
+++ b/drivers/s390/block/scm_blk.h
@@ -21,6 +21,7 @@
spinlock_t rq_lock; /* guard the request queue */
spinlock_t lock; /* guard the rest of the blockdev */
atomic_t queued_reqs;
+ enum {SCM_OPER, SCM_WR_PROHIBIT} state;
struct list_head finished_requests;
#ifdef CONFIG_SCM_BLOCK_CLUSTER_WRITE
struct list_head cluster_list;
@@ -48,6 +49,7 @@
int scm_blk_dev_setup(struct scm_blk_dev *, struct scm_device *);
void scm_blk_dev_cleanup(struct scm_blk_dev *);
+void scm_blk_set_available(struct scm_blk_dev *);
void scm_blk_irq(struct scm_device *, void *, int);
void scm_request_finish(struct scm_request *);
diff --git a/drivers/s390/block/scm_drv.c b/drivers/s390/block/scm_drv.c
index 9fa0a90..5f6180d 100644
--- a/drivers/s390/block/scm_drv.c
+++ b/drivers/s390/block/scm_drv.c
@@ -13,12 +13,23 @@
#include <asm/eadm.h>
#include "scm_blk.h"
-static void notify(struct scm_device *scmdev)
+static void scm_notify(struct scm_device *scmdev, enum scm_event event)
{
- pr_info("%lu: The capabilities of the SCM increment changed\n",
- (unsigned long) scmdev->address);
- SCM_LOG(2, "State changed");
- SCM_LOG_STATE(2, scmdev);
+ struct scm_blk_dev *bdev = dev_get_drvdata(&scmdev->dev);
+
+ switch (event) {
+ case SCM_CHANGE:
+ pr_info("%lu: The capabilities of the SCM increment changed\n",
+ (unsigned long) scmdev->address);
+ SCM_LOG(2, "State changed");
+ SCM_LOG_STATE(2, scmdev);
+ break;
+ case SCM_AVAIL:
+ SCM_LOG(2, "Increment available");
+ SCM_LOG_STATE(2, scmdev);
+ scm_blk_set_available(bdev);
+ break;
+ }
}
static int scm_probe(struct scm_device *scmdev)
@@ -64,7 +75,7 @@
.name = "scm_block",
.owner = THIS_MODULE,
},
- .notify = notify,
+ .notify = scm_notify,
.probe = scm_probe,
.remove = scm_remove,
.handler = scm_blk_irq,
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c
index 30a2255..cd79838 100644
--- a/drivers/s390/char/sclp_cmd.c
+++ b/drivers/s390/char/sclp_cmd.c
@@ -627,6 +627,8 @@
struct read_storage_sccb *sccb;
int i, id, assigned, rc;
+ if (OLDMEM_BASE) /* No standby memory in kdump mode */
+ return 0;
if (!early_read_info_sccb_valid)
return 0;
if ((sclp_facilities & 0xe00000000000ULL) != 0xe00000000000ULL)
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 31ceef1..e16c553 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -433,6 +433,20 @@
" failed (rc=%d).\n", ret);
}
+static void chsc_process_sei_scm_avail(struct chsc_sei_nt0_area *sei_area)
+{
+ int ret;
+
+ CIO_CRW_EVENT(4, "chsc: scm available information\n");
+ if (sei_area->rs != 7)
+ return;
+
+ ret = scm_process_availability_information();
+ if (ret)
+ CIO_CRW_EVENT(0, "chsc: process availability information"
+ " failed (rc=%d).\n", ret);
+}
+
static void chsc_process_sei_nt2(struct chsc_sei_nt2_area *sei_area)
{
switch (sei_area->cc) {
@@ -468,6 +482,9 @@
case 12: /* scm change notification */
chsc_process_sei_scm_change(sei_area);
break;
+ case 14: /* scm available notification */
+ chsc_process_sei_scm_avail(sei_area);
+ break;
default: /* other stuff */
CIO_CRW_EVENT(2, "chsc: sei nt0 unhandled cc=%d\n",
sei_area->cc);
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h
index 227e05f..349d5fc 100644
--- a/drivers/s390/cio/chsc.h
+++ b/drivers/s390/cio/chsc.h
@@ -156,8 +156,10 @@
#ifdef CONFIG_SCM_BUS
int scm_update_information(void);
+int scm_process_availability_information(void);
#else /* CONFIG_SCM_BUS */
static inline int scm_update_information(void) { return 0; }
+static inline int scm_process_availability_information(void) { return 0; }
#endif /* CONFIG_SCM_BUS */
diff --git a/drivers/s390/cio/scm.c b/drivers/s390/cio/scm.c
index bcf20f3..46ec256 100644
--- a/drivers/s390/cio/scm.c
+++ b/drivers/s390/cio/scm.c
@@ -211,7 +211,7 @@
goto out;
scmdrv = to_scm_drv(scmdev->dev.driver);
if (changed && scmdrv->notify)
- scmdrv->notify(scmdev);
+ scmdrv->notify(scmdev, SCM_CHANGE);
out:
device_unlock(&scmdev->dev);
if (changed)
@@ -297,6 +297,22 @@
return ret;
}
+static int scm_dev_avail(struct device *dev, void *unused)
+{
+ struct scm_driver *scmdrv = to_scm_drv(dev->driver);
+ struct scm_device *scmdev = to_scm_dev(dev);
+
+ if (dev->driver && scmdrv->notify)
+ scmdrv->notify(scmdev, SCM_AVAIL);
+
+ return 0;
+}
+
+int scm_process_availability_information(void)
+{
+ return bus_for_each_dev(&scm_bus_type, NULL, NULL, scm_dev_avail);
+}
+
static int __init scm_init(void)
{
int ret;
diff --git a/drivers/video/atmel_lcdfb.c b/drivers/video/atmel_lcdfb.c
index 12cf5f3..025428e 100644
--- a/drivers/video/atmel_lcdfb.c
+++ b/drivers/video/atmel_lcdfb.c
@@ -422,17 +422,22 @@
= var->bits_per_pixel;
break;
case 16:
+ /* Older SOCs use IBGR:555 rather than BGR:565. */
+ if (sinfo->have_intensity_bit)
+ var->green.length = 5;
+ else
+ var->green.length = 6;
+
if (sinfo->lcd_wiring_mode == ATMEL_LCDC_WIRING_RGB) {
- /* RGB:565 mode */
- var->red.offset = 11;
+ /* RGB:5X5 mode */
+ var->red.offset = var->green.length + 5;
var->blue.offset = 0;
} else {
- /* BGR:565 mode */
+ /* BGR:5X5 mode */
var->red.offset = 0;
- var->blue.offset = 11;
+ var->blue.offset = var->green.length + 5;
}
var->green.offset = 5;
- var->green.length = 6;
var->red.length = var->blue.length = 5;
break;
case 32:
@@ -679,8 +684,7 @@
case FB_VISUAL_PSEUDOCOLOR:
if (regno < 256) {
- if (cpu_is_at91sam9261() || cpu_is_at91sam9263()
- || cpu_is_at91sam9rl()) {
+ if (sinfo->have_intensity_bit) {
/* old style I+BGR:555 */
val = ((red >> 11) & 0x001f);
val |= ((green >> 6) & 0x03e0);
@@ -870,6 +874,10 @@
}
sinfo->info = info;
sinfo->pdev = pdev;
+ if (cpu_is_at91sam9261() || cpu_is_at91sam9263() ||
+ cpu_is_at91sam9rl()) {
+ sinfo->have_intensity_bit = true;
+ }
strcpy(info->fix.id, sinfo->pdev->name);
info->flags = ATMEL_LCDFB_FBINFO_DEFAULT;
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index 7ccb3c5..ef52d9c 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -187,6 +187,13 @@
* This happens with the Renesas AG-AND chips, possibly others.
*/
#define BBT_AUTO_REFRESH 0x00000080
+/*
+ * Chip requires ready check on read (for auto-incremented sequential read).
+ * True only for small page devices; large page devices do not support
+ * autoincrement.
+ */
+#define NAND_NEED_READRDY 0x00000100
+
/* Chip does not allow subpage writes */
#define NAND_NO_SUBPAGE_WRITE 0x00000200
diff --git a/include/video/atmel_lcdc.h b/include/video/atmel_lcdc.h
index 28447f1..8deb226 100644
--- a/include/video/atmel_lcdc.h
+++ b/include/video/atmel_lcdc.h
@@ -30,7 +30,6 @@
*/
#define ATMEL_LCDC_WIRING_BGR 0
#define ATMEL_LCDC_WIRING_RGB 1
-#define ATMEL_LCDC_WIRING_RGB555 2
/* LCD Controller info data structure, stored in device platform_data */
@@ -62,6 +61,7 @@
void (*atmel_lcdfb_power_control)(int on);
struct fb_monspecs *default_monspecs;
u32 pseudo_palette[16];
+ bool have_intensity_bit;
};
#define ATMEL_LCDC_DMABADDR1 0x00
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 55fac5b..b48cd59 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -3447,28 +3447,34 @@
spin_unlock_irq(&pool->lock);
mutex_unlock(&pool->assoc_mutex);
- }
- /*
- * Call schedule() so that we cross rq->lock and thus can guarantee
- * sched callbacks see the %WORKER_UNBOUND flag. This is necessary
- * as scheduler callbacks may be invoked from other cpus.
- */
- schedule();
+ /*
+ * Call schedule() so that we cross rq->lock and thus can
+ * guarantee sched callbacks see the %WORKER_UNBOUND flag.
+ * This is necessary as scheduler callbacks may be invoked
+ * from other cpus.
+ */
+ schedule();
- /*
- * Sched callbacks are disabled now. Zap nr_running. After this,
- * nr_running stays zero and need_more_worker() and keep_working()
- * are always true as long as the worklist is not empty. Pools on
- * @cpu now behave as unbound (in terms of concurrency management)
- * pools which are served by workers tied to the CPU.
- *
- * On return from this function, the current worker would trigger
- * unbound chain execution of pending work items if other workers
- * didn't already.
- */
- for_each_std_worker_pool(pool, cpu)
+ /*
+ * Sched callbacks are disabled now. Zap nr_running.
+ * After this, nr_running stays zero and need_more_worker()
+ * and keep_working() are always true as long as the
+ * worklist is not empty. This pool now behaves as an
+ * unbound (in terms of concurrency management) pool which
+ * are served by workers tied to the pool.
+ */
atomic_set(&pool->nr_running, 0);
+
+ /*
+ * With concurrency management just turned off, a busy
+ * worker blocking could lead to lengthy stalls. Kick off
+ * unbound chain execution of currently pending work items.
+ */
+ spin_lock_irq(&pool->lock);
+ wake_up_worker(pool);
+ spin_unlock_irq(&pool->lock);
+ }
}
/*
diff --git a/security/selinux/xfrm.c b/security/selinux/xfrm.c
index 48665ec..8ab2951 100644
--- a/security/selinux/xfrm.c
+++ b/security/selinux/xfrm.c
@@ -310,7 +310,7 @@
if (old_ctx) {
new_ctx = kmalloc(sizeof(*old_ctx) + old_ctx->ctx_len,
- GFP_KERNEL);
+ GFP_ATOMIC);
if (!new_ctx)
return -ENOMEM;