Merge master.kernel.org:/home/rmk/linux-2.6-arm
diff --git a/arch/frv/kernel/semaphore.c b/arch/frv/kernel/semaphore.c
index 5cba9c1..7971d68 100644
--- a/arch/frv/kernel/semaphore.c
+++ b/arch/frv/kernel/semaphore.c
@@ -20,7 +20,7 @@
struct task_struct *task;
};
-#if SEM_DEBUG
+#if SEMAPHORE_DEBUG
void semtrace(struct semaphore *sem, const char *str)
{
if (sem->debug)
diff --git a/arch/frv/mb93090-mb00/pci-irq.c b/arch/frv/mb93090-mb00/pci-irq.c
index af981bd..24622d8 100644
--- a/arch/frv/mb93090-mb00/pci-irq.c
+++ b/arch/frv/mb93090-mb00/pci-irq.c
@@ -60,7 +60,7 @@
}
}
-void __init pcibios_penalize_isa_irq(int irq, int active)
+void __init pcibios_penalize_isa_irq(int irq)
{
}
diff --git a/arch/frv/mm/init.c b/arch/frv/mm/init.c
index 7943315..765088ea 100644
--- a/arch/frv/mm/init.c
+++ b/arch/frv/mm/init.c
@@ -108,7 +108,7 @@
memset((void *) empty_zero_page, 0, PAGE_SIZE);
-#if CONFIG_HIGHMEM
+#ifdef CONFIG_HIGHMEM
if (num_physpages - num_mappedpages) {
pgd_t *pge;
pud_t *pue;
diff --git a/arch/frv/mm/pgalloc.c b/arch/frv/mm/pgalloc.c
index 2c67dfe..f76dd03 100644
--- a/arch/frv/mm/pgalloc.c
+++ b/arch/frv/mm/pgalloc.c
@@ -85,7 +85,7 @@
struct page *page = virt_to_page(pgd);
page->index = (unsigned long) pgd_list;
if (pgd_list)
- pgd_list->private = (unsigned long) &page->index;
+ set_page_private(pgd_list, (unsigned long) &page->index);
pgd_list = page;
set_page_private(page, (unsigned long)&pgd_list);
}
@@ -94,10 +94,10 @@
{
struct page *next, **pprev, *page = virt_to_page(pgd);
next = (struct page *) page->index;
- pprev = (struct page **)page_private(page);
+ pprev = (struct page **) page_private(page);
*pprev = next;
if (next)
- next->private = (unsigned long) pprev;
+ set_page_private(next, (unsigned long) pprev);
}
void pgd_ctor(void *pgd, kmem_cache_t *cache, unsigned long unused)
diff --git a/arch/m32r/kernel/io_mappi3.c b/arch/m32r/kernel/io_mappi3.c
index 6716ffe..f80321a 100644
--- a/arch/m32r/kernel/io_mappi3.c
+++ b/arch/m32r/kernel/io_mappi3.c
@@ -36,12 +36,13 @@
return (void *)(port + NONCACHE_OFFSET);
}
-#if defined(CONFIG_IDE) && !defined(CONFIG_M32R_CFC)
+#if defined(CONFIG_IDE)
static inline void *__port2addr_ata(unsigned long port)
{
static int dummy_reg;
switch (port) {
+ /* IDE0 CF */
case 0x1f0: return (void *)0xb4002000;
case 0x1f1: return (void *)0xb4012800;
case 0x1f2: return (void *)0xb4012002;
@@ -51,6 +52,17 @@
case 0x1f6: return (void *)0xb4012006;
case 0x1f7: return (void *)0xb4012806;
case 0x3f6: return (void *)0xb401200e;
+ /* IDE1 IDE */
+ case 0x170: return (void *)0xb4810000; /* Data 16bit */
+ case 0x171: return (void *)0xb4810002; /* Features / Error */
+ case 0x172: return (void *)0xb4810004; /* Sector count */
+ case 0x173: return (void *)0xb4810006; /* Sector number */
+ case 0x174: return (void *)0xb4810008; /* Cylinder low */
+ case 0x175: return (void *)0xb481000a; /* Cylinder high */
+ case 0x176: return (void *)0xb481000c; /* Device head */
+ case 0x177: return (void *)0xb481000e; /* Command */
+ case 0x376: return (void *)0xb480800c; /* Device control / Alt status */
+
default: return (void *)&dummy_reg;
}
}
@@ -108,8 +120,9 @@
{
if (port >= LAN_IOSTART && port < LAN_IOEND)
return _ne_inb(PORT2ADDR_NE(port));
-#if defined(CONFIG_IDE) && !defined(CONFIG_M32R_CFC)
- else if ((port >= 0x1f0 && port <=0x1f7) || port == 0x3f6) {
+#if defined(CONFIG_IDE)
+ else if ( ((port >= 0x170 && port <=0x177) || port == 0x376) ||
+ ((port >= 0x1f0 && port <=0x1f7) || port == 0x3f6) ){
return *(volatile unsigned char *)__port2addr_ata(port);
}
#endif
@@ -127,8 +140,9 @@
{
if (port >= LAN_IOSTART && port < LAN_IOEND)
return _ne_inw(PORT2ADDR_NE(port));
-#if defined(CONFIG_IDE) && !defined(CONFIG_M32R_CFC)
- else if ((port >= 0x1f0 && port <=0x1f7) || port == 0x3f6) {
+#if defined(CONFIG_IDE)
+ else if ( ((port >= 0x170 && port <=0x177) || port == 0x376) ||
+ ((port >= 0x1f0 && port <=0x1f7) || port == 0x3f6) ){
return *(volatile unsigned short *)__port2addr_ata(port);
}
#endif
@@ -185,8 +199,9 @@
if (port >= LAN_IOSTART && port < LAN_IOEND)
_ne_outb(b, PORT2ADDR_NE(port));
else
-#if defined(CONFIG_IDE) && !defined(CONFIG_M32R_CFC)
- if ((port >= 0x1f0 && port <=0x1f7) || port == 0x3f6) {
+#if defined(CONFIG_IDE)
+ if ( ((port >= 0x170 && port <=0x177) || port == 0x376) ||
+ ((port >= 0x1f0 && port <=0x1f7) || port == 0x3f6) ){
*(volatile unsigned char *)__port2addr_ata(port) = b;
} else
#endif
@@ -203,8 +218,9 @@
if (port >= LAN_IOSTART && port < LAN_IOEND)
_ne_outw(w, PORT2ADDR_NE(port));
else
-#if defined(CONFIG_IDE) && !defined(CONFIG_M32R_CFC)
- if ((port >= 0x1f0 && port <=0x1f7) || port == 0x3f6) {
+#if defined(CONFIG_IDE)
+ if ( ((port >= 0x170 && port <=0x177) || port == 0x376) ||
+ ((port >= 0x1f0 && port <=0x1f7) || port == 0x3f6) ){
*(volatile unsigned short *)__port2addr_ata(port) = w;
} else
#endif
@@ -253,8 +269,9 @@
{
if (port >= LAN_IOSTART && port < LAN_IOEND)
_ne_insb(PORT2ADDR_NE(port), addr, count);
-#if defined(CONFIG_IDE) && !defined(CONFIG_M32R_CFC)
- else if ((port >= 0x1f0 && port <=0x1f7) || port == 0x3f6) {
+#if defined(CONFIG_IDE)
+ else if ( ((port >= 0x170 && port <=0x177) || port == 0x376) ||
+ ((port >= 0x1f0 && port <=0x1f7) || port == 0x3f6) ){
unsigned char *buf = addr;
unsigned char *portp = __port2addr_ata(port);
while (count--)
@@ -289,8 +306,9 @@
pcc_ioread_word(9, port, (void *)addr, sizeof(unsigned short),
count, 1);
#endif
-#if defined(CONFIG_IDE) && !defined(CONFIG_M32R_CFC)
- } else if ((port >= 0x1f0 && port <=0x1f7) || port == 0x3f6) {
+#if defined(CONFIG_IDE)
+ } else if ( ((port >= 0x170 && port <=0x177) || port == 0x376) ||
+ ((port >= 0x1f0 && port <=0x1f7) || port == 0x3f6) ){
portp = __port2addr_ata(port);
while (count--)
*buf++ = *(volatile unsigned short *)portp;
@@ -321,8 +339,9 @@
portp = PORT2ADDR_NE(port);
while (count--)
_ne_outb(*buf++, portp);
-#if defined(CONFIG_IDE) && !defined(CONFIG_M32R_CFC)
- } else if ((port >= 0x1f0 && port <=0x1f7) || port == 0x3f6) {
+#if defined(CONFIG_IDE)
+ } else if ( ((port >= 0x170 && port <=0x177) || port == 0x376) ||
+ ((port >= 0x1f0 && port <=0x1f7) || port == 0x3f6) ){
portp = __port2addr_ata(port);
while (count--)
*(volatile unsigned char *)portp = *buf++;
@@ -348,8 +367,9 @@
portp = PORT2ADDR_NE(port);
while (count--)
*(volatile unsigned short *)portp = *buf++;
-#if defined(CONFIG_IDE) && !defined(CONFIG_M32R_CFC)
- } else if ((port >= 0x1f0 && port <=0x1f7) || port == 0x3f6) {
+#if defined(CONFIG_IDE)
+ } else if ( ((port >= 0x170 && port <=0x177) || port == 0x376) ||
+ ((port >= 0x1f0 && port <=0x1f7) || port == 0x3f6) ){
portp = __port2addr_ata(port);
while (count--)
*(volatile unsigned short *)portp = *buf++;
diff --git a/arch/m32r/kernel/setup_mappi3.c b/arch/m32r/kernel/setup_mappi3.c
index 9c79341..f6ecdf7 100644
--- a/arch/m32r/kernel/setup_mappi3.c
+++ b/arch/m32r/kernel/setup_mappi3.c
@@ -151,7 +151,7 @@
disable_mappi3_irq(M32R_IRQ_INT1);
#endif /* CONFIG_USB */
- /* ICUCR40: CFC IREQ */
+ /* CFC IREQ */
irq_desc[PLD_IRQ_CFIREQ].status = IRQ_DISABLED;
irq_desc[PLD_IRQ_CFIREQ].handler = &mappi3_irq_type;
irq_desc[PLD_IRQ_CFIREQ].action = 0;
@@ -160,7 +160,7 @@
disable_mappi3_irq(PLD_IRQ_CFIREQ);
#if defined(CONFIG_M32R_CFC)
- /* ICUCR41: CFC Insert */
+ /* ICUCR41: CFC Insert & eject */
irq_desc[PLD_IRQ_CFC_INSERT].status = IRQ_DISABLED;
irq_desc[PLD_IRQ_CFC_INSERT].handler = &mappi3_irq_type;
irq_desc[PLD_IRQ_CFC_INSERT].action = 0;
@@ -168,14 +168,16 @@
icu_data[PLD_IRQ_CFC_INSERT].icucr = M32R_ICUCR_IEN|M32R_ICUCR_ISMOD00;
disable_mappi3_irq(PLD_IRQ_CFC_INSERT);
- /* ICUCR42: CFC Eject */
- irq_desc[PLD_IRQ_CFC_EJECT].status = IRQ_DISABLED;
- irq_desc[PLD_IRQ_CFC_EJECT].handler = &mappi3_irq_type;
- irq_desc[PLD_IRQ_CFC_EJECT].action = 0;
- irq_desc[PLD_IRQ_CFC_EJECT].depth = 1; /* disable nested irq */
- icu_data[PLD_IRQ_CFC_EJECT].icucr = M32R_ICUCR_IEN|M32R_ICUCR_ISMOD10;
- disable_mappi3_irq(PLD_IRQ_CFC_EJECT);
#endif /* CONFIG_M32R_CFC */
+
+ /* IDE IREQ */
+ irq_desc[PLD_IRQ_IDEIREQ].status = IRQ_DISABLED;
+ irq_desc[PLD_IRQ_IDEIREQ].handler = &mappi3_irq_type;
+ irq_desc[PLD_IRQ_IDEIREQ].action = 0;
+ irq_desc[PLD_IRQ_IDEIREQ].depth = 1; /* disable nested irq */
+ icu_data[PLD_IRQ_IDEIREQ].icucr = M32R_ICUCR_IEN|M32R_ICUCR_ISMOD10;
+ disable_mappi3_irq(PLD_IRQ_IDEIREQ);
+
}
#if defined(CONFIG_SMC91X)
diff --git a/arch/m32r/kernel/sys_m32r.c b/arch/m32r/kernel/sys_m32r.c
index e0500e1..fe55b28 100644
--- a/arch/m32r/kernel/sys_m32r.c
+++ b/arch/m32r/kernel/sys_m32r.c
@@ -41,7 +41,8 @@
return -EFAULT;
local_irq_save(flags);
oldval = *addr;
- *addr = 1;
+ if (!oldval)
+ *addr = 1;
local_irq_restore(flags);
return oldval;
}
@@ -59,7 +60,8 @@
_raw_spin_lock(&tas_lock);
oldval = *addr;
- *addr = 1;
+ if (!oldval)
+ *addr = 1;
_raw_spin_unlock(&tas_lock);
return oldval;
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index 98f67c7..a13eb57 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -61,15 +61,17 @@
LDFLAGS_vmlinux := -Bstatic
# The -Iarch/$(ARCH)/include is temporary while we are merging
-CPPFLAGS += -Iarch/$(ARCH) -Iarch/$(ARCH)/include
-AFLAGS += -Iarch/$(ARCH)
-CFLAGS += -Iarch/$(ARCH) -msoft-float -pipe
+CPPFLAGS-$(CONFIG_PPC32) := -Iarch/$(ARCH) -Iarch/$(ARCH)/include
+AFLAGS-$(CONFIG_PPC32) := -Iarch/$(ARCH)
CFLAGS-$(CONFIG_PPC64) := -mminimal-toc -mtraceback=none -mcall-aixdesc
-CFLAGS-$(CONFIG_PPC32) := -ffixed-r2 -mmultiple
-CFLAGS += $(CFLAGS-y)
+CFLAGS-$(CONFIG_PPC32) := -Iarch/$(ARCH) -ffixed-r2 -mmultiple
+CPPFLAGS += $(CPPFLAGS-y)
+AFLAGS += $(AFLAGS-y)
+CFLAGS += -msoft-float -pipe $(CFLAGS-y)
CPP = $(CC) -E $(CFLAGS)
# Temporary hack until we have migrated to asm-powerpc
-LINUXINCLUDE += -Iarch/$(ARCH)/include
+LINUXINCLUDE-$(CONFIG_PPC32) := -Iarch/$(ARCH)/include
+LINUXINCLUDE += $(LINUXINCLUDE-y)
CHECKFLAGS += -m$(SZ) -D__powerpc__ -D__powerpc$(SZ)__
@@ -173,11 +175,13 @@
archprepare: checkbin
+ifeq ($(CONFIG_PPC32),y)
# Temporary hack until we have migrated to asm-powerpc
include/asm: arch/$(ARCH)/include/asm
arch/$(ARCH)/include/asm: FORCE
$(Q)if [ ! -d arch/$(ARCH)/include ]; then mkdir -p arch/$(ARCH)/include; fi
$(Q)ln -fsn $(srctree)/include/asm-$(OLDARCH) arch/$(ARCH)/include/asm
+endif
# Use the file '.tmp_gas_check' for binutils tests, as gas won't output
# to stdout and these checks are run even on install targets.
diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
index b44b36e..f0c47da 100644
--- a/arch/powerpc/kernel/vdso.c
+++ b/arch/powerpc/kernel/vdso.c
@@ -145,8 +145,7 @@
struct page *pg = virt_to_page(vdso32_kbase +
i*PAGE_SIZE);
struct page *upg = (vma && vma->vm_mm) ?
- follow_page(vma->vm_mm, vma->vm_start +
- i*PAGE_SIZE, 0)
+ follow_page(vma, vma->vm_start + i*PAGE_SIZE, 0)
: NULL;
dump_one_vdso_page(pg, upg);
}
@@ -157,8 +156,7 @@
struct page *pg = virt_to_page(vdso64_kbase +
i*PAGE_SIZE);
struct page *upg = (vma && vma->vm_mm) ?
- follow_page(vma->vm_mm, vma->vm_start +
- i*PAGE_SIZE, 0)
+ follow_page(vma, vma->vm_start + i*PAGE_SIZE, 0)
: NULL;
dump_one_vdso_page(pg, upg);
}
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index f867bba..6bc9dba 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -295,7 +295,7 @@
if (addr < 0x100000000UL)
err = open_low_hpage_areas(current->mm,
LOW_ESID_MASK(addr, len));
- if ((addr + len) >= 0x100000000UL)
+ if ((addr + len) > 0x100000000UL)
err = open_high_hpage_areas(current->mm,
HTLB_AREA_MASK(addr, len));
if (err) {
diff --git a/arch/powerpc/platforms/iseries/iommu.c b/arch/powerpc/platforms/iseries/iommu.c
index bf081b3..2b54eeb 100644
--- a/arch/powerpc/platforms/iseries/iommu.c
+++ b/arch/powerpc/platforms/iseries/iommu.c
@@ -3,7 +3,7 @@
*
* Rewrite, cleanup:
*
- * Copyright (C) 2004 Olof Johansson <olof@austin.ibm.com>, IBM Corporation
+ * Copyright (C) 2004 Olof Johansson <olof@lixom.net>, IBM Corporation
*
* Dynamic DMA mapping support, iSeries-specific parts.
*
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index 97ba521..c78f2b2 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -5,7 +5,7 @@
*
* Rewrite, cleanup:
*
- * Copyright (C) 2004 Olof Johansson <olof@austin.ibm.com>, IBM Corporation
+ * Copyright (C) 2004 Olof Johansson <olof@lixom.net>, IBM Corporation
*
* Dynamic DMA mapping support, pSeries-specific parts, both SMP and LPAR.
*
diff --git a/arch/powerpc/sysdev/dart.h b/arch/powerpc/sysdev/dart.h
index ea8f0d9..33ed9ed 100644
--- a/arch/powerpc/sysdev/dart.h
+++ b/arch/powerpc/sysdev/dart.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2004 Olof Johansson <olof@austin.ibm.com>, IBM Corporation
+ * Copyright (C) 2004 Olof Johansson <olof@lixom.net>, IBM Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/arch/powerpc/sysdev/u3_iommu.c b/arch/powerpc/sysdev/u3_iommu.c
index f32baf7..5c1a26a 100644
--- a/arch/powerpc/sysdev/u3_iommu.c
+++ b/arch/powerpc/sysdev/u3_iommu.c
@@ -1,11 +1,11 @@
/*
* arch/powerpc/sysdev/u3_iommu.c
*
- * Copyright (C) 2004 Olof Johansson <olof@austin.ibm.com>, IBM Corporation
+ * Copyright (C) 2004 Olof Johansson <olof@lixom.net>, IBM Corporation
*
* Based on pSeries_iommu.c:
* Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
- * Copyright (C) 2004 Olof Johansson <olof@austin.ibm.com>, IBM Corporation
+ * Copyright (C) 2004 Olof Johansson <olof@lixom.net>, IBM Corporation
*
* Dynamic DMA mapping support, Apple U3 & IBM CPC925 "DART" iommu.
*
diff --git a/arch/sparc/mm/generic.c b/arch/sparc/mm/generic.c
index 0410bae..2cb0728 100644
--- a/arch/sparc/mm/generic.c
+++ b/arch/sparc/mm/generic.c
@@ -32,9 +32,7 @@
if (end > PMD_SIZE)
end = PMD_SIZE;
do {
- pte_t oldpage = *pte;
- pte_clear(mm, address, pte);
- set_pte(pte, mk_pte_io(offset, prot, space));
+ set_pte_at(mm, address, pte, mk_pte_io(offset, prot, space));
address += PAGE_SIZE;
offset += PAGE_SIZE;
pte++;
@@ -63,7 +61,7 @@
}
int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
- unsigned long pfn, unsigned long size, pgprot_t prot)
+ unsigned long pfn, unsigned long size, pgprot_t prot)
{
int error = 0;
pgd_t * dir;
@@ -74,7 +72,9 @@
unsigned long offset = GET_PFN(pfn) << PAGE_SHIFT;
/* See comment in mm/memory.c remap_pfn_range */
- vma->vm_flags |= VM_IO | VM_RESERVED | VM_UNPAGED;
+ vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
+ vma->vm_pgoff = (offset >> PAGE_SHIFT) |
+ ((unsigned long)space << 28UL);
prot = __pgprot(pg_iobits);
offset -= from;
diff --git a/arch/sparc64/mm/generic.c b/arch/sparc64/mm/generic.c
index 8fd4cb1..d9396c1 100644
--- a/arch/sparc64/mm/generic.c
+++ b/arch/sparc64/mm/generic.c
@@ -15,6 +15,15 @@
#include <asm/page.h>
#include <asm/tlbflush.h>
+static inline pte_t mk_pte_io(unsigned long page, pgprot_t prot, int space)
+{
+ pte_t pte;
+ pte_val(pte) = (((page) | pgprot_val(prot) | _PAGE_E) &
+ ~(unsigned long)_PAGE_CACHE);
+ pte_val(pte) |= (((unsigned long)space) << 32);
+ return pte;
+}
+
/* Remap IO memory, the same way as remap_pfn_range(), but use
* the obio memory space.
*
@@ -126,9 +135,13 @@
struct mm_struct *mm = vma->vm_mm;
int space = GET_IOSPACE(pfn);
unsigned long offset = GET_PFN(pfn) << PAGE_SHIFT;
+ unsigned long phys_base;
+
+ phys_base = offset | (((unsigned long) space) << 32UL);
/* See comment in mm/memory.c remap_pfn_range */
- vma->vm_flags |= VM_IO | VM_RESERVED | VM_UNPAGED;
+ vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
+ vma->vm_pgoff = phys_base >> PAGE_SHIFT;
prot = __pgprot(pg_iobits);
offset -= from;
diff --git a/drivers/char/drm/drm_lock.c b/drivers/char/drm/drm_lock.c
index b276ae8..b48a595 100644
--- a/drivers/char/drm/drm_lock.c
+++ b/drivers/char/drm/drm_lock.c
@@ -104,6 +104,10 @@
__set_current_state(TASK_RUNNING);
remove_wait_queue(&dev->lock.lock_queue, &entry);
+ DRM_DEBUG("%d %s\n", lock.context, ret ? "interrupted" : "has lock");
+ if (ret)
+ return ret;
+
sigemptyset(&dev->sigmask);
sigaddset(&dev->sigmask, SIGSTOP);
sigaddset(&dev->sigmask, SIGTSTP);
@@ -116,8 +120,12 @@
if (dev->driver->dma_ready && (lock.flags & _DRM_LOCK_READY))
dev->driver->dma_ready(dev);
- if (dev->driver->dma_quiescent && (lock.flags & _DRM_LOCK_QUIESCENT))
- return dev->driver->dma_quiescent(dev);
+ if (dev->driver->dma_quiescent && (lock.flags & _DRM_LOCK_QUIESCENT)) {
+ if (dev->driver->dma_quiescent(dev)) {
+ DRM_DEBUG("%d waiting for DMA quiescent\n", lock.context);
+ return DRM_ERR(EBUSY);
+ }
+ }
/* dev->driver->kernel_context_switch isn't used by any of the x86
* drivers but is used by the Sparc driver.
@@ -128,9 +136,7 @@
dev->driver->kernel_context_switch(dev, dev->last_context,
lock.context);
}
- DRM_DEBUG("%d %s\n", lock.context, ret ? "interrupted" : "has lock");
-
- return ret;
+ return 0;
}
/**
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 29c3b63..91dd669 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -591,7 +591,7 @@
if (vma->vm_start > addr || (vma->vm_flags & VM_WRITE) == 0)
goto out_up;
- if (vma->vm_flags & (VM_SHARED | VM_HUGETLB | VM_UNPAGED))
+ if (vma->vm_flags & (VM_SHARED | VM_HUGETLB))
break;
count = vma->vm_end - addr;
if (count > size)
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 1c0f62d..815902c 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1113,21 +1113,13 @@
{
int retval = -EINVAL;
- /*
- * If we are already in context of hotplug thread, we dont need to
- * acquire the hotplug lock. Otherwise acquire cpucontrol to prevent
- * hotplug from removing this cpu that we are working on.
- */
- if (!current_in_cpu_hotplug())
- lock_cpu_hotplug();
-
+ lock_cpu_hotplug();
dprintk("target for CPU %u: %u kHz, relation %u\n", policy->cpu,
target_freq, relation);
if (cpu_online(policy->cpu) && cpufreq_driver->target)
retval = cpufreq_driver->target(policy, target_freq, relation);
- if (!current_in_cpu_hotplug())
- unlock_cpu_hotplug();
+ unlock_cpu_hotplug();
return retval;
}
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index 41d6b40..d393b50 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -355,9 +355,9 @@
spin_unlock_irqrestore(&port_priv->reg_lock, flags);
kfree(reg_req);
error3:
- kfree(mad_agent_priv);
-error2:
ib_dereg_mr(mad_agent_priv->agent.mr);
+error2:
+ kfree(mad_agent_priv);
error1:
return ret;
}
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 78c7418..cd12fca 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -1028,7 +1028,6 @@
mddev->size = le64_to_cpu(sb->size)/2;
mddev->events = le64_to_cpu(sb->events);
mddev->bitmap_offset = 0;
- mddev->default_bitmap_offset = 0;
mddev->default_bitmap_offset = 1024;
mddev->recovery_cp = le64_to_cpu(sb->resync_offset);
@@ -2932,6 +2931,9 @@
mddev->sb_dirty = 1;
+ mddev->default_bitmap_offset = MD_SB_BYTES >> 9;
+ mddev->bitmap_offset = 0;
+
/*
* Generate a 128 bit UUID
*/
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 2da9d3b..3066c58 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -953,9 +953,6 @@
int mirror = 0;
mirror_info_t *p;
- if (rdev->saved_raid_disk >= 0 &&
- conf->mirrors[rdev->saved_raid_disk].rdev == NULL)
- mirror = rdev->saved_raid_disk;
for (mirror=0; mirror < mddev->raid_disks; mirror++)
if ( !(p=conf->mirrors+mirror)->rdev) {
@@ -972,7 +969,10 @@
p->head_position = 0;
rdev->raid_disk = mirror;
found = 1;
- if (rdev->saved_raid_disk != mirror)
+ /* As all devices are equivalent, we don't need a full recovery
+ * if this was recently any drive of the array
+ */
+ if (rdev->saved_raid_disk < 0)
conf->fullsync = 1;
rcu_assign_pointer(p->rdev, rdev);
break;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 867f06a..713dc9c 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -552,7 +552,11 @@
!test_bit(In_sync, &rdev->flags))
continue;
- if (!atomic_read(&rdev->nr_pending)) {
+ /* This optimisation is debatable, and completely destroys
+ * sequential read speed for 'far copies' arrays. So only
+ * keep it for 'near' arrays, and review those later.
+ */
+ if (conf->near_copies > 1 && !atomic_read(&rdev->nr_pending)) {
disk = ndisk;
slot = nslot;
break;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index e2a4028..36d5f8a 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -1704,7 +1704,9 @@
if (conf->seq_flush - conf->seq_write > 0) {
int seq = conf->seq_flush;
+ spin_unlock_irq(&conf->device_lock);
bitmap_unplug(mddev->bitmap);
+ spin_lock_irq(&conf->device_lock);
conf->seq_write = seq;
activate_bit_delay(conf);
}
diff --git a/drivers/md/raid6main.c b/drivers/md/raid6main.c
index eae5a35..0000d16 100644
--- a/drivers/md/raid6main.c
+++ b/drivers/md/raid6main.c
@@ -1702,6 +1702,8 @@
int data_disks = raid_disks - 2;
sector_t max_sector = mddev->size << 1;
int sync_blocks;
+ int still_degraded = 0;
+ int i;
if (sector_nr >= max_sector) {
/* just being told to finish up .. nothing much to do */
@@ -1710,7 +1712,7 @@
if (mddev->curr_resync < max_sector) /* aborted */
bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
&sync_blocks, 1);
- else /* compelted sync */
+ else /* completed sync */
conf->fullsync = 0;
bitmap_close_sync(mddev->bitmap);
@@ -1748,7 +1750,16 @@
*/
schedule_timeout_uninterruptible(1);
}
- bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 0);
+ /* Need to check if array will still be degraded after recovery/resync
+ * We don't need to check the 'failed' flag as when that gets set,
+ * recovery aborts.
+ */
+ for (i=0; i<mddev->raid_disks; i++)
+ if (conf->disks[i].rdev == NULL)
+ still_degraded = 1;
+
+ bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, still_degraded);
+
spin_lock(&sh->lock);
set_bit(STRIPE_SYNCING, &sh->state);
clear_bit(STRIPE_INSYNC, &sh->state);
@@ -1784,7 +1795,9 @@
if (conf->seq_flush - conf->seq_write > 0) {
int seq = conf->seq_flush;
+ spin_unlock_irq(&conf->device_lock);
bitmap_unplug(mddev->bitmap);
+ spin_lock_irq(&conf->device_lock);
conf->seq_write = seq;
activate_bit_delay(conf);
}
@@ -2145,9 +2158,15 @@
/* no point adding a device */
return 0;
/*
- * find the disk ...
+ * find the disk ... but prefer rdev->saved_raid_disk
+ * if possible.
*/
- for (disk=0; disk < mddev->raid_disks; disk++)
+ if (rdev->saved_raid_disk >= 0 &&
+ conf->disks[rdev->saved_raid_disk].rdev == NULL)
+ disk = rdev->saved_raid_disk;
+ else
+ disk = 0;
+ for ( ; disk < mddev->raid_disks; disk++)
if ((p=conf->disks + disk)->rdev == NULL) {
clear_bit(In_sync, &rdev->flags);
rdev->raid_disk = disk;
diff --git a/drivers/media/video/Kconfig b/drivers/media/video/Kconfig
index 1a3b3c7..ecb9a31 100644
--- a/drivers/media/video/Kconfig
+++ b/drivers/media/video/Kconfig
@@ -26,7 +26,7 @@
module will be called bttv.
config VIDEO_BT848_DVB
- tristate "DVB/ATSC Support for bt878 based TV cards"
+ bool "DVB/ATSC Support for bt878 based TV cards"
depends on VIDEO_BT848 && DVB_CORE
select DVB_BT8XX
---help---
diff --git a/drivers/media/video/cx88/Kconfig b/drivers/media/video/cx88/Kconfig
index 41818b6..85ba410 100644
--- a/drivers/media/video/cx88/Kconfig
+++ b/drivers/media/video/cx88/Kconfig
@@ -46,8 +46,8 @@
If you are unsure, choose Y.
config VIDEO_CX88_DVB_MT352
- tristate "Zarlink MT352 DVB-T Support"
- default m
+ bool "Zarlink MT352 DVB-T Support"
+ default y
depends on VIDEO_CX88_DVB && !VIDEO_CX88_DVB_ALL_FRONTENDS
select DVB_MT352
---help---
@@ -55,8 +55,8 @@
Connexant 2388x chip and the MT352 demodulator.
config VIDEO_CX88_DVB_OR51132
- tristate "OR51132 ATSC Support"
- default m
+ bool "OR51132 ATSC Support"
+ default y
depends on VIDEO_CX88_DVB && !VIDEO_CX88_DVB_ALL_FRONTENDS
select DVB_OR51132
---help---
@@ -64,8 +64,8 @@
Connexant 2388x chip and the OR51132 demodulator.
config VIDEO_CX88_DVB_CX22702
- tristate "Conexant CX22702 DVB-T Support"
- default m
+ bool "Conexant CX22702 DVB-T Support"
+ default y
depends on VIDEO_CX88_DVB && !VIDEO_CX88_DVB_ALL_FRONTENDS
select DVB_CX22702
---help---
@@ -73,8 +73,8 @@
Connexant 2388x chip and the CX22702 demodulator.
config VIDEO_CX88_DVB_LGDT330X
- tristate "LG Electronics DT3302/DT3303 ATSC Support"
- default m
+ bool "LG Electronics DT3302/DT3303 ATSC Support"
+ default y
depends on VIDEO_CX88_DVB && !VIDEO_CX88_DVB_ALL_FRONTENDS
select DVB_LGDT330X
---help---
@@ -82,8 +82,8 @@
Connexant 2388x chip and the LGDT3302/LGDT3303 demodulator.
config VIDEO_CX88_DVB_NXT200X
- tristate "NXT2002/NXT2004 ATSC Support"
- default m
+ bool "NXT2002/NXT2004 ATSC Support"
+ default y
depends on VIDEO_CX88_DVB && !VIDEO_CX88_DVB_ALL_FRONTENDS
select DVB_NXT200X
---help---
diff --git a/drivers/media/video/cx88/Makefile b/drivers/media/video/cx88/Makefile
index 0df40b7..54401b0 100644
--- a/drivers/media/video/cx88/Makefile
+++ b/drivers/media/video/cx88/Makefile
@@ -9,21 +9,12 @@
EXTRA_CFLAGS += -I$(src)/..
EXTRA_CFLAGS += -I$(srctree)/drivers/media/dvb/dvb-core
EXTRA_CFLAGS += -I$(srctree)/drivers/media/dvb/frontends
-ifneq ($(CONFIG_VIDEO_BUF_DVB),n)
- EXTRA_CFLAGS += -DHAVE_VIDEO_BUF_DVB=1
-endif
-ifneq ($(CONFIG_DVB_CX22702),n)
- EXTRA_CFLAGS += -DHAVE_CX22702=1
-endif
-ifneq ($(CONFIG_DVB_OR51132),n)
- EXTRA_CFLAGS += -DHAVE_OR51132=1
-endif
-ifneq ($(CONFIG_DVB_LGDT330X),n)
- EXTRA_CFLAGS += -DHAVE_LGDT330X=1
-endif
-ifneq ($(CONFIG_DVB_MT352),n)
- EXTRA_CFLAGS += -DHAVE_MT352=1
-endif
-ifneq ($(CONFIG_DVB_NXT200X),n)
- EXTRA_CFLAGS += -DHAVE_NXT200X=1
-endif
+
+extra-cflags-$(CONFIG_VIDEO_BUF_DVB) += -DHAVE_VIDEO_BUF_DVB=1
+extra-cflags-$(CONFIG_DVB_CX22702) += -DHAVE_CX22702=1
+extra-cflags-$(CONFIG_DVB_OR51132) += -DHAVE_OR51132=1
+extra-cflags-$(CONFIG_DVB_LGDT330X) += -DHAVE_LGDT330X=1
+extra-cflags-$(CONFIG_DVB_MT352) += -DHAVE_MT352=1
+extra-cflags-$(CONFIG_DVB_NXT200X) += -DHAVE_NXT200X=1
+
+EXTRA_CFLAGS += $(extra-cflags-y) $(extra-cflags-m)
diff --git a/drivers/media/video/saa7134/Kconfig b/drivers/media/video/saa7134/Kconfig
index 7bdeabe..c512c44 100644
--- a/drivers/media/video/saa7134/Kconfig
+++ b/drivers/media/video/saa7134/Kconfig
@@ -42,8 +42,8 @@
If you are unsure, choose Y.
config VIDEO_SAA7134_DVB_MT352
- tristate "Zarlink MT352 DVB-T Support"
- default m
+ bool "Zarlink MT352 DVB-T Support"
+ default y
depends on VIDEO_SAA7134_DVB && !VIDEO_SAA7134_DVB_ALL_FRONTENDS
select DVB_MT352
---help---
@@ -51,8 +51,8 @@
Philips saa7134 chip and the MT352 demodulator.
config VIDEO_SAA7134_DVB_TDA1004X
- tristate "Phillips TDA10045H/TDA10046H DVB-T Support"
- default m
+ bool "Phillips TDA10045H/TDA10046H DVB-T Support"
+ default y
depends on VIDEO_SAA7134_DVB && !VIDEO_SAA7134_DVB_ALL_FRONTENDS
select DVB_TDA1004X
---help---
@@ -60,8 +60,8 @@
Philips saa7134 chip and the TDA10045H/TDA10046H demodulator.
config VIDEO_SAA7134_DVB_NXT200X
- tristate "NXT2002/NXT2004 ATSC Support"
- default m
+ bool "NXT2002/NXT2004 ATSC Support"
+ default y
depends on VIDEO_SAA7134_DVB && !VIDEO_SAA7134_DVB_ALL_FRONTENDS
select DVB_NXT200X
---help---
diff --git a/drivers/media/video/saa7134/Makefile b/drivers/media/video/saa7134/Makefile
index 4226b61..134f83a 100644
--- a/drivers/media/video/saa7134/Makefile
+++ b/drivers/media/video/saa7134/Makefile
@@ -11,15 +11,10 @@
EXTRA_CFLAGS += -I$(src)/..
EXTRA_CFLAGS += -I$(srctree)/drivers/media/dvb/dvb-core
EXTRA_CFLAGS += -I$(srctree)/drivers/media/dvb/frontends
-ifneq ($(CONFIG_VIDEO_BUF_DVB),n)
- EXTRA_CFLAGS += -DHAVE_VIDEO_BUF_DVB=1
-endif
-ifneq ($(CONFIG_DVB_MT352),n)
- EXTRA_CFLAGS += -DHAVE_MT352=1
-endif
-ifneq ($(CONFIG_DVB_TDA1004X),n)
- EXTRA_CFLAGS += -DHAVE_TDA1004X=1
-endif
-ifneq ($(CONFIG_DVB_NXT200X),n)
- EXTRA_CFLAGS += -DHAVE_NXT200X=1
-endif
+
+extra-cflags-$(CONFIG_VIDEO_BUF_DVB) += -DHAVE_VIDEO_BUF_DVB=1
+extra-cflags-$(CONFIG_DVB_MT352) += -DHAVE_MT352=1
+extra-cflags-$(CONFIG_DVB_TDA1004X) += -DHAVE_TDA1004X=1
+extra-cflags-$(CONFIG_DVB_NXT200X) += -DHAVE_NXT200X=1
+
+EXTRA_CFLAGS += $(extra-cflags-y) $(extra-cflags-m)
diff --git a/drivers/message/i2o/pci.c b/drivers/message/i2o/pci.c
index 66c03e8..81ef306 100644
--- a/drivers/message/i2o/pci.c
+++ b/drivers/message/i2o/pci.c
@@ -421,8 +421,8 @@
i2o_pci_free(c);
free_controller:
- i2o_iop_free(c);
put_device(c->device.parent);
+ i2o_iop_free(c);
disable:
pci_disable_device(pdev);
diff --git a/drivers/pcmcia/m32r_cfc.c b/drivers/pcmcia/m32r_cfc.c
index 2c22b4b..078579a 100644
--- a/drivers/pcmcia/m32r_cfc.c
+++ b/drivers/pcmcia/m32r_cfc.c
@@ -355,9 +355,10 @@
#ifndef CONFIG_PLAT_USRV
/* insert interrupt */
request_irq(irq, pcc_interrupt, 0, "m32r_cfc", pcc_interrupt);
+#ifndef CONFIG_PLAT_MAPPI3
/* eject interrupt */
request_irq(irq+1, pcc_interrupt, 0, "m32r_cfc", pcc_interrupt);
-
+#endif
debug(3, "m32r_cfc: enable CFMSK, RDYSEL\n");
pcc_set(pcc_sockets, (unsigned int)PLD_CFIMASK, 0x01);
#endif /* CONFIG_PLAT_USRV */
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
index c28e3ae..418fc7b 100644
--- a/drivers/scsi/dpt_i2o.c
+++ b/drivers/scsi/dpt_i2o.c
@@ -816,7 +816,7 @@
static void adpt_i2o_sys_shutdown(void)
{
adpt_hba *pHba, *pNext;
- struct adpt_i2o_post_wait_data *p1, *p2;
+ struct adpt_i2o_post_wait_data *p1, *old;
printk(KERN_INFO"Shutting down Adaptec I2O controllers.\n");
printk(KERN_INFO" This could take a few minutes if there are many devices attached\n");
@@ -830,13 +830,14 @@
}
/* Remove any timedout entries from the wait queue. */
- p2 = NULL;
// spin_lock_irqsave(&adpt_post_wait_lock, flags);
/* Nothing should be outstanding at this point so just
* free them
*/
- for(p1 = adpt_post_wait_queue; p1; p2 = p1, p1 = p2->next) {
- kfree(p1);
+ for(p1 = adpt_post_wait_queue; p1;) {
+ old = p1;
+ p1 = p1->next;
+ kfree(old);
}
// spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
adpt_post_wait_queue = NULL;
diff --git a/drivers/video/console/fbcon_ccw.c b/drivers/video/console/fbcon_ccw.c
index 3afd1ee..4952b66 100644
--- a/drivers/video/console/fbcon_ccw.c
+++ b/drivers/video/console/fbcon_ccw.c
@@ -34,7 +34,7 @@
msk <<= (8 - mod);
if (offset > mod)
- set_bit(FBCON_BIT(7), (void *)&msk1);
+ msk1 |= 0x01;
for (i = 0; i < vc->vc_font.width; i++) {
for (j = 0; j < width; j++) {
diff --git a/drivers/video/console/fbcon_rotate.h b/drivers/video/console/fbcon_rotate.h
index e504fbf..1b8f92f 100644
--- a/drivers/video/console/fbcon_rotate.h
+++ b/drivers/video/console/fbcon_rotate.h
@@ -21,21 +21,13 @@
(s == SCROLL_REDRAW || s == SCROLL_MOVE || !(i)->fix.xpanstep) ? \
(i)->var.xres : (i)->var.xres_virtual; })
-/*
- * The bitmap is always big endian
- */
-#if defined(__LITTLE_ENDIAN)
-#define FBCON_BIT(b) (7 - (b))
-#else
-#define FBCON_BIT(b) (b)
-#endif
static inline int pattern_test_bit(u32 x, u32 y, u32 pitch, const char *pat)
{
u32 tmp = (y * pitch) + x, index = tmp / 8, bit = tmp % 8;
pat +=index;
- return (test_bit(FBCON_BIT(bit), (void *)pat));
+ return (*pat) & (0x80 >> bit);
}
static inline void pattern_set_bit(u32 x, u32 y, u32 pitch, char *pat)
@@ -43,7 +35,8 @@
u32 tmp = (y * pitch) + x, index = tmp / 8, bit = tmp % 8;
pat += index;
- set_bit(FBCON_BIT(bit), (void *)pat);
+
+ (*pat) |= 0x80 >> bit;
}
static inline void rotate_ud(const char *in, char *out, u32 width, u32 height)
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index be72881..0ea965c 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -427,6 +427,8 @@
v9fs_mistat2inode(fcall->params.rstat.stat, file_inode, sb);
kfree(fcall);
+ fcall = NULL;
+ file_dentry->d_op = &v9fs_dentry_operations;
d_instantiate(file_dentry, file_inode);
if (perm & V9FS_DMDIR) {
diff --git a/fs/dquot.c b/fs/dquot.c
index 05b6028..2a62b3d 100644
--- a/fs/dquot.c
+++ b/fs/dquot.c
@@ -1513,10 +1513,16 @@
if (IS_ERR(dentry))
return PTR_ERR(dentry);
+ if (!dentry->d_inode) {
+ error = -ENOENT;
+ goto out;
+ }
+
error = security_quota_on(dentry);
if (!error)
error = vfs_quota_on_inode(dentry->d_inode, type, format_id);
+out:
dput(dentry);
return error;
}
diff --git a/fs/ext3/resize.c b/fs/ext3/resize.c
index 1be78b4b..6104ad3 100644
--- a/fs/ext3/resize.c
+++ b/fs/ext3/resize.c
@@ -767,6 +767,7 @@
if (input->group != EXT3_SB(sb)->s_groups_count) {
ext3_warning(sb, __FUNCTION__,
"multiple resizers run on filesystem!\n");
+ err = -EBUSY;
goto exit_journal;
}
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index c045cc7..51f5da6 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -74,6 +74,24 @@
return 1;
}
+static int dir_alias(struct inode *inode)
+{
+ if (S_ISDIR(inode->i_mode)) {
+ /* Don't allow creating an alias to a directory */
+ struct dentry *alias = d_find_alias(inode);
+ if (alias) {
+ dput(alias);
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static inline int invalid_nodeid(u64 nodeid)
+{
+ return !nodeid || nodeid == FUSE_ROOT_ID;
+}
+
static struct dentry_operations fuse_dentry_operations = {
.d_revalidate = fuse_dentry_revalidate,
};
@@ -97,7 +115,7 @@
fuse_lookup_init(req, dir, entry, &outarg);
request_send(fc, req);
err = req->out.h.error;
- if (!err && (!outarg.nodeid || outarg.nodeid == FUSE_ROOT_ID))
+ if (!err && invalid_nodeid(outarg.nodeid))
err = -EIO;
if (!err) {
inode = fuse_iget(dir->i_sb, outarg.nodeid, outarg.generation,
@@ -193,7 +211,7 @@
}
err = -EIO;
- if (!S_ISREG(outentry.attr.mode))
+ if (!S_ISREG(outentry.attr.mode) || invalid_nodeid(outentry.nodeid))
goto out_free_ff;
inode = fuse_iget(dir->i_sb, outentry.nodeid, outentry.generation,
@@ -250,7 +268,7 @@
fuse_put_request(fc, req);
return err;
}
- if (!outarg.nodeid || outarg.nodeid == FUSE_ROOT_ID) {
+ if (invalid_nodeid(outarg.nodeid)) {
fuse_put_request(fc, req);
return -EIO;
}
@@ -263,7 +281,7 @@
fuse_put_request(fc, req);
/* Don't allow userspace to do really stupid things... */
- if ((inode->i_mode ^ mode) & S_IFMT) {
+ if (((inode->i_mode ^ mode) & S_IFMT) || dir_alias(inode)) {
iput(inode);
return -EIO;
}
@@ -874,14 +892,9 @@
err = fuse_lookup_iget(dir, entry, &inode);
if (err)
return ERR_PTR(err);
- if (inode && S_ISDIR(inode->i_mode)) {
- /* Don't allow creating an alias to a directory */
- struct dentry *alias = d_find_alias(inode);
- if (alias) {
- dput(alias);
- iput(inode);
- return ERR_PTR(-EIO);
- }
+ if (inode && dir_alias(inode)) {
+ iput(inode);
+ return ERR_PTR(-EIO);
}
d_add(entry, inode);
return NULL;
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 6391d89..aaab1a5 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -643,14 +643,11 @@
/*
* Invalidate the local caches
*/
-void
-nfs_zap_caches(struct inode *inode)
+static void nfs_zap_caches_locked(struct inode *inode)
{
struct nfs_inode *nfsi = NFS_I(inode);
int mode = inode->i_mode;
- spin_lock(&inode->i_lock);
-
NFS_ATTRTIMEO(inode) = NFS_MINATTRTIMEO(inode);
NFS_ATTRTIMEO_UPDATE(inode) = jiffies;
@@ -659,7 +656,12 @@
nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
else
nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
+}
+void nfs_zap_caches(struct inode *inode)
+{
+ spin_lock(&inode->i_lock);
+ nfs_zap_caches_locked(inode);
spin_unlock(&inode->i_lock);
}
@@ -676,16 +678,13 @@
}
/*
- * Invalidate, but do not unhash, the inode
+ * Invalidate, but do not unhash, the inode.
+ * NB: must be called with inode->i_lock held!
*/
-static void
-nfs_invalidate_inode(struct inode *inode)
+static void nfs_invalidate_inode(struct inode *inode)
{
- umode_t save_mode = inode->i_mode;
-
- make_bad_inode(inode);
- inode->i_mode = save_mode;
- nfs_zap_caches(inode);
+ set_bit(NFS_INO_STALE, &NFS_FLAGS(inode));
+ nfs_zap_caches_locked(inode);
}
struct nfs_find_desc {
@@ -1528,14 +1527,13 @@
printk(KERN_DEBUG "%s: inode %ld mode changed, %07o to %07o\n",
__FUNCTION__, inode->i_ino, inode->i_mode, fattr->mode);
#endif
+ out_err:
/*
* No need to worry about unhashing the dentry, as the
* lookup validation will know that the inode is bad.
* (But we fall through to invalidate the caches.)
*/
nfs_invalidate_inode(inode);
- out_err:
- set_bit(NFS_INO_STALE, &NFS_FLAGS(inode));
return -ESTALE;
}
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 21482b2..60e0dd8 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -3071,15 +3071,15 @@
struct nfs4_client *clp = state->owner->so_client;
int status;
- down_read(&clp->cl_sem);
/* Is this a delegated open? */
- if (test_bit(NFS_DELEGATED_STATE, &state->flags)) {
+ if (NFS_I(state->inode)->delegation_state != 0) {
/* Yes: cache locks! */
status = do_vfs_lock(request->fl_file, request);
/* ...but avoid races with delegation recall... */
if (status < 0 || test_bit(NFS_DELEGATED_STATE, &state->flags))
- goto out;
+ return status;
}
+ down_read(&clp->cl_sem);
status = nfs4_set_lock_state(state, request);
if (status != 0)
goto out;
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 0675f32..5ef4c57 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -644,12 +644,15 @@
struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter)
{
+ struct rpc_sequence *sequence = counter->sequence;
struct nfs_seqid *new;
new = kmalloc(sizeof(*new), GFP_KERNEL);
if (new != NULL) {
new->sequence = counter;
- INIT_LIST_HEAD(&new->list);
+ spin_lock(&sequence->lock);
+ list_add_tail(&new->list, &sequence->list);
+ spin_unlock(&sequence->lock);
}
return new;
}
@@ -658,12 +661,10 @@
{
struct rpc_sequence *sequence = seqid->sequence->sequence;
- if (!list_empty(&seqid->list)) {
- spin_lock(&sequence->lock);
- list_del(&seqid->list);
- spin_unlock(&sequence->lock);
- }
- rpc_wake_up_next(&sequence->wait);
+ spin_lock(&sequence->lock);
+ list_del(&seqid->list);
+ spin_unlock(&sequence->lock);
+ rpc_wake_up(&sequence->wait);
kfree(seqid);
}
@@ -722,11 +723,10 @@
if (sequence->list.next == &seqid->list)
goto out;
spin_lock(&sequence->lock);
- if (!list_empty(&sequence->list)) {
+ if (sequence->list.next != &seqid->list) {
rpc_sleep_on(&sequence->wait, task, NULL, NULL);
status = -EAGAIN;
- } else
- list_add(&seqid->list, &sequence->list);
+ }
spin_unlock(&sequence->lock);
out:
return status;
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 9ab97ce..50bd5a8f 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -402,12 +402,11 @@
/*
* Calculate numa node maps for a vma
*/
-static struct numa_maps *get_numa_maps(const struct vm_area_struct *vma)
+static struct numa_maps *get_numa_maps(struct vm_area_struct *vma)
{
+ int i;
struct page *page;
unsigned long vaddr;
- struct mm_struct *mm = vma->vm_mm;
- int i;
struct numa_maps *md = kmalloc(sizeof(struct numa_maps), GFP_KERNEL);
if (!md)
@@ -420,7 +419,7 @@
md->node[i] =0;
for (vaddr = vma->vm_start; vaddr < vma->vm_end; vaddr += PAGE_SIZE) {
- page = follow_page(mm, vaddr, 0);
+ page = follow_page(vma, vaddr, 0);
if (page) {
int count = page_mapcount(page);
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index 5f82352..0a044ad 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -2194,7 +2194,7 @@
INITIALIZE_PATH(path);
int pos_in_item;
int jbegin_count = JOURNAL_PER_BALANCE_CNT;
- loff_t byte_offset = (block << inode->i_sb->s_blocksize_bits) + 1;
+ loff_t byte_offset = ((loff_t)block << inode->i_sb->s_blocksize_bits)+1;
int retval;
int use_get_block = 0;
int bytes_copied = 0;
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c
index c610897..94d3cdf 100644
--- a/fs/xfs/linux-2.6/xfs_aops.c
+++ b/fs/xfs/linux-2.6/xfs_aops.c
@@ -941,13 +941,12 @@
int retpbbm = 1;
int error;
- if (blocks) {
- offset = blocks << inode->i_blkbits; /* 64 bit goodness */
- size = (ssize_t) min_t(xfs_off_t, offset, LONG_MAX);
- } else {
- size = 1 << inode->i_blkbits;
- }
offset = (xfs_off_t)iblock << inode->i_blkbits;
+ if (blocks)
+ size = (ssize_t) min_t(xfs_off_t, LONG_MAX,
+ (xfs_off_t)blocks << inode->i_blkbits);
+ else
+ size = 1 << inode->i_blkbits;
VOP_BMAP(vp, offset, size,
create ? flags : BMAPI_READ, &iomap, &retpbbm, error);
@@ -1007,7 +1006,7 @@
ASSERT(iomap.iomap_bsize - iomap.iomap_delta > 0);
offset = min_t(xfs_off_t,
iomap.iomap_bsize - iomap.iomap_delta,
- blocks << inode->i_blkbits);
+ (xfs_off_t)blocks << inode->i_blkbits);
bh_result->b_size = (u32) min_t(xfs_off_t, UINT_MAX, offset);
}
diff --git a/fs/xfs/xfs_attr_leaf.c b/fs/xfs/xfs_attr_leaf.c
index 35e557b..1c74218 100644
--- a/fs/xfs/xfs_attr_leaf.c
+++ b/fs/xfs/xfs_attr_leaf.c
@@ -310,7 +310,8 @@
* Fix up the start offset of the attribute fork
*/
totsize -= size;
- if (totsize == sizeof(xfs_attr_sf_hdr_t) && !args->addname) {
+ if (totsize == sizeof(xfs_attr_sf_hdr_t) && !args->addname &&
+ !(mp->m_flags & XFS_MOUNT_COMPAT_ATTR)) {
/*
* Last attribute now removed, revert to original
* inode format making all literal area available
@@ -328,7 +329,8 @@
xfs_idata_realloc(dp, -size, XFS_ATTR_FORK);
dp->i_d.di_forkoff = xfs_attr_shortform_bytesfit(dp, totsize);
ASSERT(dp->i_d.di_forkoff);
- ASSERT(totsize > sizeof(xfs_attr_sf_hdr_t) || args->addname);
+ ASSERT(totsize > sizeof(xfs_attr_sf_hdr_t) || args->addname ||
+ (mp->m_flags & XFS_MOUNT_COMPAT_ATTR));
dp->i_afp->if_ext_max =
XFS_IFORK_ASIZE(dp) / (uint)sizeof(xfs_bmbt_rec_t);
dp->i_df.if_ext_max =
@@ -737,7 +739,8 @@
+ name_loc->namelen
+ INT_GET(name_loc->valuelen, ARCH_CONVERT);
}
- if (bytes == sizeof(struct xfs_attr_sf_hdr))
+ if (!(dp->i_mount->m_flags & XFS_MOUNT_COMPAT_ATTR) &&
+ (bytes == sizeof(struct xfs_attr_sf_hdr)))
return(-1);
return(xfs_attr_shortform_bytesfit(dp, bytes));
}
@@ -775,6 +778,8 @@
goto out;
if (forkoff == -1) {
+ ASSERT(!(dp->i_mount->m_flags & XFS_MOUNT_COMPAT_ATTR));
+
/*
* Last attribute was removed, revert to original
* inode format making all literal area available
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
index 7ceabd0..d1236d6 100644
--- a/fs/xfs/xfs_fsops.c
+++ b/fs/xfs/xfs_fsops.c
@@ -550,7 +550,7 @@
struct vfs *vfsp = XFS_MTOVFS(mp);
struct super_block *sb = freeze_bdev(vfsp->vfs_super->s_bdev);
- if (sb) {
+ if (sb && !IS_ERR(sb)) {
xfs_force_shutdown(mp, XFS_FORCE_UMOUNT);
thaw_bdev(sb->s_bdev, sb);
}
diff --git a/fs/xfs/xfs_iomap.h b/fs/xfs/xfs_iomap.h
index fcd6d63..3ce204a 100644
--- a/fs/xfs/xfs_iomap.h
+++ b/fs/xfs/xfs_iomap.h
@@ -69,7 +69,7 @@
xfs_buftarg_t *iomap_target;
xfs_off_t iomap_offset; /* offset of mapping, bytes */
xfs_off_t iomap_bsize; /* size of mapping, bytes */
- size_t iomap_delta; /* offset into mapping, bytes */
+ xfs_off_t iomap_delta; /* offset into mapping, bytes */
iomap_flags_t iomap_flags;
} xfs_iomap_t;
diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h
index 8f28514..4518b18 100644
--- a/fs/xfs/xfs_log_priv.h
+++ b/fs/xfs/xfs_log_priv.h
@@ -494,10 +494,8 @@
#define XLOG_FORCED_SHUTDOWN(log) ((log)->l_flags & XLOG_IO_ERROR)
-#define XLOG_GRANT_SUB_SPACE(log,bytes,type) \
- xlog_grant_sub_space(log,bytes,type)
-static inline void xlog_grant_sub_space(struct log *log, int bytes, int type)
-{
+#define XLOG_GRANT_SUB_SPACE(log,bytes,type) \
+ { \
if (type == 'w') { \
(log)->l_grant_write_bytes -= (bytes); \
if ((log)->l_grant_write_bytes < 0) { \
@@ -511,13 +509,9 @@
(log)->l_grant_reserve_cycle--; \
} \
} \
-}
-
-#define XLOG_GRANT_ADD_SPACE(log,bytes,type) \
- xlog_grant_add_space(log,bytes,type)
-static inline void
-xlog_grant_add_space(struct log *log, int bytes, int type)
-{
+ }
+#define XLOG_GRANT_ADD_SPACE(log,bytes,type) \
+ { \
if (type == 'w') { \
(log)->l_grant_write_bytes += (bytes); \
if ((log)->l_grant_write_bytes > (log)->l_logsize) { \
@@ -531,12 +525,9 @@
(log)->l_grant_reserve_cycle++; \
} \
} \
-}
-
-#define XLOG_INS_TICKETQ(q, tic) xlog_ins_ticketq(q, tic)
-static inline void
-xlog_ins_ticketq(struct xlog_ticket *q, struct xlog_ticket *tic)
-{ \
+ }
+#define XLOG_INS_TICKETQ(q, tic) \
+ { \
if (q) { \
(tic)->t_next = (q); \
(tic)->t_prev = (q)->t_prev; \
@@ -547,12 +538,9 @@
(q) = (tic); \
} \
(tic)->t_flags |= XLOG_TIC_IN_Q; \
-}
-
-#define XLOG_DEL_TICKETQ(q, tic) xlog_del_ticketq(q, tic)
-static inline void
-xlog_del_ticketq(struct xlog_ticket *q, struct xlog_ticket *tic)
-{ \
+ }
+#define XLOG_DEL_TICKETQ(q, tic) \
+ { \
if ((tic) == (tic)->t_next) { \
(q) = NULL; \
} else { \
@@ -562,7 +550,7 @@
} \
(tic)->t_next = (tic)->t_prev = NULL; \
(tic)->t_flags &= ~XLOG_TIC_IN_Q; \
-}
+ }
/* common routines */
extern xfs_lsn_t xlog_assign_tail_lsn(struct xfs_mount *mp);
diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
index 7c1f745..e03fa2a 100644
--- a/fs/xfs/xfs_vnodeops.c
+++ b/fs/xfs/xfs_vnodeops.c
@@ -3958,8 +3958,9 @@
}
}
XFS_MOUNT_IUNLOCK(mp);
- xfs_finish_reclaim(ip, noblock,
- XFS_IFLUSH_DELWRI_ELSE_ASYNC);
+ if (xfs_finish_reclaim(ip, noblock,
+ XFS_IFLUSH_DELWRI_ELSE_ASYNC))
+ delay(1);
purged = 1;
break;
}
diff --git a/include/asm-frv/hardirq.h b/include/asm-frv/hardirq.h
index 5248ca0..6851239 100644
--- a/include/asm-frv/hardirq.h
+++ b/include/asm-frv/hardirq.h
@@ -14,6 +14,7 @@
#include <linux/config.h>
#include <linux/threads.h>
+#include <linux/irq.h>
typedef struct {
unsigned int __softirq_pending;
diff --git a/include/asm-frv/ide.h b/include/asm-frv/ide.h
index f9caecf..ae031ea 100644
--- a/include/asm-frv/ide.h
+++ b/include/asm-frv/ide.h
@@ -33,10 +33,10 @@
/*
* some bits needed for parts of the IDE subsystem to compile
*/
-#define __ide_mm_insw(port, addr, n) insw(port, addr, n)
-#define __ide_mm_insl(port, addr, n) insl(port, addr, n)
-#define __ide_mm_outsw(port, addr, n) outsw(port, addr, n)
-#define __ide_mm_outsl(port, addr, n) outsl(port, addr, n)
+#define __ide_mm_insw(port, addr, n) insw((unsigned long) (port), addr, n)
+#define __ide_mm_insl(port, addr, n) insl((unsigned long) (port), addr, n)
+#define __ide_mm_outsw(port, addr, n) outsw((unsigned long) (port), addr, n)
+#define __ide_mm_outsl(port, addr, n) outsl((unsigned long) (port), addr, n)
#endif /* __KERNEL__ */
diff --git a/include/asm-frv/page.h b/include/asm-frv/page.h
index 4feba56..b8221b6 100644
--- a/include/asm-frv/page.h
+++ b/include/asm-frv/page.h
@@ -47,8 +47,8 @@
#define devmem_is_allowed(pfn) 1
-#define __pa(vaddr) virt_to_phys((void *) vaddr)
-#define __va(paddr) phys_to_virt((unsigned long) paddr)
+#define __pa(vaddr) virt_to_phys((void *) (unsigned long) (vaddr))
+#define __va(paddr) phys_to_virt((unsigned long) (paddr))
#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
diff --git a/include/asm-frv/semaphore.h b/include/asm-frv/semaphore.h
index b183962..907c5c3 100644
--- a/include/asm-frv/semaphore.h
+++ b/include/asm-frv/semaphore.h
@@ -20,7 +20,7 @@
#include <linux/spinlock.h>
#include <linux/rwsem.h>
-#define SEMAPHORE_DEBUG WAITQUEUE_DEBUG
+#define SEMAPHORE_DEBUG 0
/*
* the semaphore definition
diff --git a/include/asm-frv/thread_info.h b/include/asm-frv/thread_info.h
index c8cba78..60f6b2a 100644
--- a/include/asm-frv/thread_info.h
+++ b/include/asm-frv/thread_info.h
@@ -58,7 +58,7 @@
#endif
-#define PREEMPT_ACTIVE 0x4000000
+#define PREEMPT_ACTIVE 0x10000000
/*
* macros/functions for gaining access to the thread information structure
diff --git a/include/asm-m32r/atomic.h b/include/asm-m32r/atomic.h
index bfff69a..ef1fb8e 100644
--- a/include/asm-m32r/atomic.h
+++ b/include/asm-m32r/atomic.h
@@ -242,6 +242,27 @@
*/
#define atomic_add_negative(i,v) (atomic_add_return((i), (v)) < 0)
+#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
+
+/**
+ * atomic_add_unless - add unless the number is a given value
+ * @v: pointer of type atomic_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, so long as it was not @u.
+ * Returns non-zero if @v was not @u, and zero otherwise.
+ */
+#define atomic_add_unless(v, a, u) \
+({ \
+ int c, old; \
+ c = atomic_read(v); \
+ while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
+ c = old; \
+ c != (u); \
+})
+#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
+
static __inline__ void atomic_clear_mask(unsigned long mask, atomic_t *addr)
{
unsigned long flags;
diff --git a/include/asm-m32r/ide.h b/include/asm-m32r/ide.h
index 194393b..f7aa969 100644
--- a/include/asm-m32r/ide.h
+++ b/include/asm-m32r/ide.h
@@ -25,20 +25,23 @@
# endif
#endif
-#if defined(CONFIG_PLAT_M32700UT)
-#include <asm/irq.h>
-#include <asm/m32700ut/m32700ut_pld.h>
-#endif
+#include <asm/m32r.h>
+
#define IDE_ARCH_OBSOLETE_DEFAULTS
static __inline__ int ide_default_irq(unsigned long base)
{
switch (base) {
-#if defined(CONFIG_PLAT_M32700UT) || defined(CONFIG_PLAT_MAPPI2) || defined(CONFIG_PLAT_MAPPI3)
+#if defined(CONFIG_PLAT_M32700UT) || defined(CONFIG_PLAT_MAPPI2)
case 0x1f0: return PLD_IRQ_CFIREQ;
default:
return 0;
+#elif defined(CONFIG_PLAT_MAPPI3)
+ case 0x1f0: return PLD_IRQ_CFIREQ;
+ case 0x170: return PLD_IRQ_IDEIREQ;
+ default:
+ return 0;
#else
case 0x1f0: return 14;
case 0x170: return 15;
diff --git a/include/asm-m32r/mappi3/mappi3_pld.h b/include/asm-m32r/mappi3/mappi3_pld.h
index 3f1551f..1d3c25d 100644
--- a/include/asm-m32r/mappi3/mappi3_pld.h
+++ b/include/asm-m32r/mappi3/mappi3_pld.h
@@ -59,7 +59,7 @@
#define M32R_IRQ_I2C (28) /* I2C-BUS */
#define PLD_IRQ_CFIREQ (6) /* INT5 CFC Card Interrupt */
#define PLD_IRQ_CFC_INSERT (7) /* INT6 CFC Card Insert */
-#define PLD_IRQ_CFC_EJECT (8) /* INT7 CFC Card Eject */
+#define PLD_IRQ_IDEIREQ (8) /* INT7 IDE Interrupt */
#define PLD_IRQ_MMCCARD (43) /* MMC Card Insert */
#define PLD_IRQ_MMCIRQ (44) /* MMC Transfer Done */
diff --git a/include/asm-m32r/system.h b/include/asm-m32r/system.h
index 73348c3..5eee832 100644
--- a/include/asm-m32r/system.h
+++ b/include/asm-m32r/system.h
@@ -11,6 +11,7 @@
*/
#include <linux/config.h>
+#include <asm/assembler.h>
#ifdef __KERNEL__
@@ -132,8 +133,6 @@
!(flags & 0x40); \
})
-#endif /* __KERNEL__ */
-
#define nop() __asm__ __volatile__ ("nop" : : )
#define xchg(ptr,x) \
@@ -213,6 +212,67 @@
return (tmp);
}
+#define __HAVE_ARCH_CMPXCHG 1
+
+static __inline__ unsigned long
+__cmpxchg_u32(volatile unsigned int *p, unsigned int old, unsigned int new)
+{
+ unsigned long flags;
+ unsigned int retval;
+
+ local_irq_save(flags);
+ __asm__ __volatile__ (
+ DCACHE_CLEAR("%0", "r4", "%1")
+ M32R_LOCK" %0, @%1; \n"
+ " bne %0, %2, 1f; \n"
+ M32R_UNLOCK" %3, @%1; \n"
+ " bra 2f; \n"
+ " .fillinsn \n"
+ "1:"
+ M32R_UNLOCK" %2, @%1; \n"
+ " .fillinsn \n"
+ "2:"
+ : "=&r" (retval)
+ : "r" (p), "r" (old), "r" (new)
+ : "cbit", "memory"
+#ifdef CONFIG_CHIP_M32700_TS1
+ , "r4"
+#endif /* CONFIG_CHIP_M32700_TS1 */
+ );
+ local_irq_restore(flags);
+
+ return retval;
+}
+
+/* This function doesn't exist, so you'll get a linker error
+ if something tries to do an invalid cmpxchg(). */
+extern void __cmpxchg_called_with_bad_pointer(void);
+
+static __inline__ unsigned long
+__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
+{
+ switch (size) {
+ case 4:
+ return __cmpxchg_u32(ptr, old, new);
+#if 0 /* we don't have __cmpxchg_u64 */
+ case 8:
+ return __cmpxchg_u64(ptr, old, new);
+#endif /* 0 */
+ }
+ __cmpxchg_called_with_bad_pointer();
+ return old;
+}
+
+#define cmpxchg(ptr,o,n) \
+ ({ \
+ __typeof__(*(ptr)) _o_ = (o); \
+ __typeof__(*(ptr)) _n_ = (n); \
+ (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
+ (unsigned long)_n_, sizeof(*(ptr))); \
+ })
+
+#endif /* __KERNEL__ */
+
/*
* Memory barrier.
*
diff --git a/include/asm-powerpc/iommu.h b/include/asm-powerpc/iommu.h
index 6a35e65..f89f060 100644
--- a/include/asm-powerpc/iommu.h
+++ b/include/asm-powerpc/iommu.h
@@ -1,7 +1,7 @@
/*
* Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
* Rewrite, cleanup:
- * Copyright (C) 2004 Olof Johansson <olof@austin.ibm.com>, IBM Corporation
+ * Copyright (C) 2004 Olof Johansson <olof@lixom.net>, IBM Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/include/asm-powerpc/page_64.h b/include/asm-powerpc/page_64.h
index 58a3dd9..6642c01 100644
--- a/include/asm-powerpc/page_64.h
+++ b/include/asm-powerpc/page_64.h
@@ -103,8 +103,9 @@
#define HTLB_AREA_SIZE (1UL << HTLB_AREA_SHIFT)
#define GET_HTLB_AREA(x) ((x) >> HTLB_AREA_SHIFT)
-#define LOW_ESID_MASK(addr, len) (((1U << (GET_ESID(addr+len-1)+1)) \
- - (1U << GET_ESID(addr))) & 0xffff)
+#define LOW_ESID_MASK(addr, len) \
+ (((1U << (GET_ESID(min((addr)+(len)-1, 0x100000000UL))+1)) \
+ - (1U << GET_ESID(min((addr), 0x100000000UL)))) & 0xffff)
#define HTLB_AREA_MASK(addr, len) (((1U << (GET_HTLB_AREA(addr+len-1)+1)) \
- (1U << GET_HTLB_AREA(addr))) & 0xffff)
@@ -113,17 +114,21 @@
#define ARCH_HAS_SETCLEAR_HUGE_PTE
#define touches_hugepage_low_range(mm, addr, len) \
- (LOW_ESID_MASK((addr), (len)) & (mm)->context.low_htlb_areas)
+ (((addr) < 0x100000000UL) \
+ && (LOW_ESID_MASK((addr), (len)) & (mm)->context.low_htlb_areas))
#define touches_hugepage_high_range(mm, addr, len) \
- (HTLB_AREA_MASK((addr), (len)) & (mm)->context.high_htlb_areas)
+ ((((addr) + (len)) > 0x100000000UL) \
+ && (HTLB_AREA_MASK((addr), (len)) & (mm)->context.high_htlb_areas))
#define __within_hugepage_low_range(addr, len, segmask) \
- ((LOW_ESID_MASK((addr), (len)) | (segmask)) == (segmask))
+ ( (((addr)+(len)) <= 0x100000000UL) \
+ && ((LOW_ESID_MASK((addr), (len)) | (segmask)) == (segmask)))
#define within_hugepage_low_range(addr, len) \
__within_hugepage_low_range((addr), (len), \
current->mm->context.low_htlb_areas)
#define __within_hugepage_high_range(addr, len, zonemask) \
- ((HTLB_AREA_MASK((addr), (len)) | (zonemask)) == (zonemask))
+ ( ((addr) >= 0x100000000UL) \
+ && ((HTLB_AREA_MASK((addr), (len)) | (zonemask)) == (zonemask)))
#define within_hugepage_high_range(addr, len) \
__within_hugepage_high_range((addr), (len), \
current->mm->context.high_htlb_areas)
diff --git a/include/asm-powerpc/tce.h b/include/asm-powerpc/tce.h
index d099d52..980a094 100644
--- a/include/asm-powerpc/tce.h
+++ b/include/asm-powerpc/tce.h
@@ -1,7 +1,7 @@
/*
* Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
* Rewrite, cleanup:
- * Copyright (C) 2004 Olof Johansson <olof@austin.ibm.com>, IBM Corporation
+ * Copyright (C) 2004 Olof Johansson <olof@lixom.net>, IBM Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/include/asm-sparc64/pgtable.h b/include/asm-sparc64/pgtable.h
index 9a02879b..f0a9b44 100644
--- a/include/asm-sparc64/pgtable.h
+++ b/include/asm-sparc64/pgtable.h
@@ -348,16 +348,6 @@
struct vm_area_struct;
extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t);
-/* Make a non-present pseudo-TTE. */
-static inline pte_t mk_pte_io(unsigned long page, pgprot_t prot, int space)
-{
- pte_t pte;
- pte_val(pte) = (((page) | pgprot_val(prot) | _PAGE_E) &
- ~(unsigned long)_PAGE_CACHE);
- pte_val(pte) |= (((unsigned long)space) << 32);
- return pte;
-}
-
/* Encode and de-code a swap entry */
#define __swp_type(entry) (((entry).val >> PAGE_SHIFT) & 0xffUL)
#define __swp_offset(entry) ((entry).val >> (PAGE_SHIFT + 8UL))
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 43c4453..0ed1d48 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -65,10 +65,9 @@
#ifdef CONFIG_HOTPLUG_CPU
/* Stop CPUs going up and down. */
-extern struct semaphore cpucontrol;
-#define lock_cpu_hotplug() down(&cpucontrol)
-#define unlock_cpu_hotplug() up(&cpucontrol)
-#define lock_cpu_hotplug_interruptible() down_interruptible(&cpucontrol)
+extern void lock_cpu_hotplug(void);
+extern void unlock_cpu_hotplug(void);
+extern int lock_cpu_hotplug_interruptible(void);
#define hotcpu_notifier(fn, pri) { \
static struct notifier_block fn##_nb = \
{ .notifier_call = fn, .priority = pri }; \
diff --git a/include/linux/memory.h b/include/linux/memory.h
index 9a42438..dc4081b 100644
--- a/include/linux/memory.h
+++ b/include/linux/memory.h
@@ -85,7 +85,6 @@
extern int register_memory_notifier(struct notifier_block *nb);
extern void unregister_memory_notifier(struct notifier_block *nb);
-extern struct sysdev_class memory_sysdev_class;
#endif /* CONFIG_MEMORY_HOTPLUG */
#define hotplug_memory_notifier(fn, pri) { \
diff --git a/include/linux/mm.h b/include/linux/mm.h
index f0cdfd1..6a75a7a 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -145,7 +145,7 @@
#define VM_GROWSDOWN 0x00000100 /* general info on the segment */
#define VM_GROWSUP 0x00000200
#define VM_SHM 0x00000000 /* Means nothing: delete it later */
-#define VM_UNPAGED 0x00000400 /* Pages managed without map count */
+#define VM_PFNMAP 0x00000400 /* Page-ranges managed without "struct page", just pure PFN */
#define VM_DENYWRITE 0x00000800 /* ETXTBSY on write attempts.. */
#define VM_EXECUTABLE 0x00001000
@@ -664,6 +664,7 @@
unsigned long truncate_count; /* Compare vm_truncate_count */
};
+struct page *vm_normal_page(struct vm_area_struct *, unsigned long, pte_t);
unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address,
unsigned long size, struct zap_details *);
unsigned long unmap_vmas(struct mmu_gather **tlb,
@@ -953,7 +954,7 @@
int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
unsigned long pfn, unsigned long size, pgprot_t);
-struct page *follow_page(struct mm_struct *, unsigned long address,
+struct page *follow_page(struct vm_area_struct *, unsigned long address,
unsigned int foll_flags);
#define FOLL_WRITE 0x01 /* check pte is writable */
#define FOLL_TOUCH 0x02 /* mark page accessed */
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index 35b30e6..33261f1 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -89,7 +89,7 @@
/*
* Called from mm/vmscan.c to handle paging out
*/
-int page_referenced(struct page *, int is_locked, int ignore_token);
+int page_referenced(struct page *, int is_locked);
int try_to_unmap(struct page *);
/*
@@ -109,7 +109,7 @@
#define anon_vma_prepare(vma) (0)
#define anon_vma_link(vma) do {} while (0)
-#define page_referenced(page,l,i) TestClearPageReferenced(page)
+#define page_referenced(page,l) TestClearPageReferenced(page)
#define try_to_unmap(page) SWAP_FAIL
#endif /* CONFIG_MMU */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 2038bd2..b0ad6f3 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -908,7 +908,6 @@
#define PF_SYNCWRITE 0x00200000 /* I am doing a sync write */
#define PF_BORROWED_MM 0x00400000 /* I am a kthread doing use_mm */
#define PF_RANDOMIZE 0x00800000 /* randomize virtual address space */
-#define PF_HOTPLUG_CPU 0x01000000 /* Currently performing CPU hotplug */
/*
* Only the _current_ task can read/write to tsk->flags, but other
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 20c9756..508668f 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -239,6 +239,11 @@
__put_swap_token(mm);
}
+static inline void disable_swap_token(void)
+{
+ put_swap_token(swap_token_mm);
+}
+
#else /* CONFIG_SWAP */
#define total_swap_pages 0
@@ -283,6 +288,7 @@
#define put_swap_token(x) do { } while(0)
#define grab_swap_token() do { } while(0)
#define has_swap_token(x) 0
+#define disable_swap_token() do { } while(0)
#endif /* CONFIG_SWAP */
#endif /* __KERNEL__*/
diff --git a/kernel/cpu.c b/kernel/cpu.c
index d61ba88..e882c6b 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -16,47 +16,76 @@
#include <asm/semaphore.h>
/* This protects CPUs going up and down... */
-DECLARE_MUTEX(cpucontrol);
-EXPORT_SYMBOL_GPL(cpucontrol);
+static DECLARE_MUTEX(cpucontrol);
static struct notifier_block *cpu_chain;
-/*
- * Used to check by callers if they need to acquire the cpucontrol
- * or not to protect a cpu from being removed. Its sometimes required to
- * call these functions both for normal operations, and in response to
- * a cpu being added/removed. If the context of the call is in the same
- * thread context as a CPU hotplug thread, we dont need to take the lock
- * since its already protected
- * check drivers/cpufreq/cpufreq.c for its usage - Ashok Raj
- */
+#ifdef CONFIG_HOTPLUG_CPU
+static struct task_struct *lock_cpu_hotplug_owner;
+static int lock_cpu_hotplug_depth;
-int current_in_cpu_hotplug(void)
+static int __lock_cpu_hotplug(int interruptible)
{
- return (current->flags & PF_HOTPLUG_CPU);
+ int ret = 0;
+
+ if (lock_cpu_hotplug_owner != current) {
+ if (interruptible)
+ ret = down_interruptible(&cpucontrol);
+ else
+ down(&cpucontrol);
+ }
+
+ /*
+ * Set only if we succeed in locking
+ */
+ if (!ret) {
+ lock_cpu_hotplug_depth++;
+ lock_cpu_hotplug_owner = current;
+ }
+
+ return ret;
}
-EXPORT_SYMBOL_GPL(current_in_cpu_hotplug);
+void lock_cpu_hotplug(void)
+{
+ __lock_cpu_hotplug(0);
+}
+EXPORT_SYMBOL_GPL(lock_cpu_hotplug);
+void unlock_cpu_hotplug(void)
+{
+ if (--lock_cpu_hotplug_depth == 0) {
+ lock_cpu_hotplug_owner = NULL;
+ up(&cpucontrol);
+ }
+}
+EXPORT_SYMBOL_GPL(unlock_cpu_hotplug);
+
+int lock_cpu_hotplug_interruptible(void)
+{
+ return __lock_cpu_hotplug(1);
+}
+EXPORT_SYMBOL_GPL(lock_cpu_hotplug_interruptible);
+#endif /* CONFIG_HOTPLUG_CPU */
/* Need to know about CPUs going up/down? */
int register_cpu_notifier(struct notifier_block *nb)
{
int ret;
- if ((ret = down_interruptible(&cpucontrol)) != 0)
+ if ((ret = lock_cpu_hotplug_interruptible()) != 0)
return ret;
ret = notifier_chain_register(&cpu_chain, nb);
- up(&cpucontrol);
+ unlock_cpu_hotplug();
return ret;
}
EXPORT_SYMBOL(register_cpu_notifier);
void unregister_cpu_notifier(struct notifier_block *nb)
{
- down(&cpucontrol);
+ lock_cpu_hotplug();
notifier_chain_unregister(&cpu_chain, nb);
- up(&cpucontrol);
+ unlock_cpu_hotplug();
}
EXPORT_SYMBOL(unregister_cpu_notifier);
@@ -112,13 +141,6 @@
goto out;
}
- /*
- * Leave a trace in current->flags indicating we are already in
- * process of performing CPU hotplug. Callers can check if cpucontrol
- * is already acquired by current thread, and if so not cause
- * a dead lock by not acquiring the lock
- */
- current->flags |= PF_HOTPLUG_CPU;
err = notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE,
(void *)(long)cpu);
if (err == NOTIFY_BAD) {
@@ -171,7 +193,6 @@
out_allowed:
set_cpus_allowed(current, old_allowed);
out:
- current->flags &= ~PF_HOTPLUG_CPU;
unlock_cpu_hotplug();
return err;
}
@@ -182,7 +203,7 @@
int ret;
void *hcpu = (void *)(long)cpu;
- if ((ret = down_interruptible(&cpucontrol)) != 0)
+ if ((ret = lock_cpu_hotplug_interruptible()) != 0)
return ret;
if (cpu_online(cpu) || !cpu_present(cpu)) {
@@ -190,11 +211,6 @@
goto out;
}
- /*
- * Leave a trace in current->flags indicating we are already in
- * process of performing CPU hotplug.
- */
- current->flags |= PF_HOTPLUG_CPU;
ret = notifier_call_chain(&cpu_chain, CPU_UP_PREPARE, hcpu);
if (ret == NOTIFY_BAD) {
printk("%s: attempt to bring up CPU %u failed\n",
@@ -217,7 +233,6 @@
if (ret != 0)
notifier_call_chain(&cpu_chain, CPU_UP_CANCELED, hcpu);
out:
- current->flags &= ~PF_HOTPLUG_CPU;
- up(&cpucontrol);
+ unlock_cpu_hotplug();
return ret;
}
diff --git a/kernel/fork.c b/kernel/fork.c
index 1c1cf8d..fb8572a 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1124,8 +1124,6 @@
if (unlikely(p->ptrace & PT_PTRACED))
__ptrace_link(p, current->parent);
- cpuset_fork(p);
-
attach_pid(p, PIDTYPE_PID, p->pid);
attach_pid(p, PIDTYPE_TGID, p->tgid);
if (thread_group_leader(p)) {
@@ -1135,13 +1133,14 @@
__get_cpu_var(process_counts)++;
}
- proc_fork_connector(p);
if (!current->signal->tty && p->signal->tty)
p->signal->tty = NULL;
nr_threads++;
total_forks++;
write_unlock_irq(&tasklist_lock);
+ proc_fork_connector(p);
+ cpuset_fork(p);
retval = 0;
fork_out:
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index 84af54c..cae4f57 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -36,7 +36,7 @@
union cpu_time_count ret;
ret.sched = 0; /* high half always zero when .cpu used */
if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
- ret.sched = tp->tv_sec * NSEC_PER_SEC + tp->tv_nsec;
+ ret.sched = (unsigned long long)tp->tv_sec * NSEC_PER_SEC + tp->tv_nsec;
} else {
ret.cpu = timespec_to_cputime(tp);
}
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 42df83d..2bd5aee 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -102,7 +102,7 @@
if (!test_and_set_bit(0, &work->pending)) {
if (unlikely(is_single_threaded(wq)))
- cpu = 0;
+ cpu = any_online_cpu(cpu_online_map);
BUG_ON(!list_empty(&work->entry));
__queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
ret = 1;
@@ -118,7 +118,7 @@
int cpu = smp_processor_id();
if (unlikely(is_single_threaded(wq)))
- cpu = 0;
+ cpu = any_online_cpu(cpu_online_map);
__queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
}
@@ -266,8 +266,8 @@
might_sleep();
if (is_single_threaded(wq)) {
- /* Always use cpu 0's area. */
- flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, 0));
+ /* Always use first cpu's area. */
+ flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, any_online_cpu(cpu_online_map)));
} else {
int cpu;
@@ -320,7 +320,7 @@
lock_cpu_hotplug();
if (singlethread) {
INIT_LIST_HEAD(&wq->list);
- p = create_workqueue_thread(wq, 0);
+ p = create_workqueue_thread(wq, any_online_cpu(cpu_online_map));
if (!p)
destroy = 1;
else
@@ -374,7 +374,7 @@
/* We don't need the distraction of CPUs appearing and vanishing. */
lock_cpu_hotplug();
if (is_single_threaded(wq))
- cleanup_workqueue_thread(wq, 0);
+ cleanup_workqueue_thread(wq, any_online_cpu(cpu_online_map));
else {
for_each_online_cpu(cpu)
cleanup_workqueue_thread(wq, cpu);
diff --git a/lib/genalloc.c b/lib/genalloc.c
index d6d30d2e..9ce0a6a 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -95,12 +95,10 @@
if (size > max_chunk_size)
return 0;
- i = 0;
-
size = max(size, 1 << ALLOC_MIN_SHIFT);
- s = roundup_pow_of_two(size);
-
- j = i;
+ i = fls(size - 1);
+ s = 1 << i;
+ j = i -= ALLOC_MIN_SHIFT;
spin_lock_irqsave(&poolp->lock, flags);
while (!h[j].next) {
@@ -153,10 +151,10 @@
if (size > max_chunk_size)
return;
- i = 0;
-
size = max(size, 1 << ALLOC_MIN_SHIFT);
- s = roundup_pow_of_two(size);
+ i = fls(size - 1);
+ s = 1 << i;
+ i -= ALLOC_MIN_SHIFT;
a = ptr;
diff --git a/mm/fremap.c b/mm/fremap.c
index 007cbad..f851775 100644
--- a/mm/fremap.c
+++ b/mm/fremap.c
@@ -27,24 +27,20 @@
struct page *page = NULL;
if (pte_present(pte)) {
- unsigned long pfn = pte_pfn(pte);
- flush_cache_page(vma, addr, pfn);
+ flush_cache_page(vma, addr, pte_pfn(pte));
pte = ptep_clear_flush(vma, addr, ptep);
- if (unlikely(!pfn_valid(pfn))) {
- print_bad_pte(vma, pte, addr);
- goto out;
+ page = vm_normal_page(vma, addr, pte);
+ if (page) {
+ if (pte_dirty(pte))
+ set_page_dirty(page);
+ page_remove_rmap(page);
+ page_cache_release(page);
}
- page = pfn_to_page(pfn);
- if (pte_dirty(pte))
- set_page_dirty(page);
- page_remove_rmap(page);
- page_cache_release(page);
} else {
if (!pte_file(pte))
free_swap_and_cache(pte_to_swp_entry(pte));
pte_clear(mm, addr, ptep);
}
-out:
return !!page;
}
@@ -65,8 +61,6 @@
pte_t pte_val;
spinlock_t *ptl;
- BUG_ON(vma->vm_flags & VM_UNPAGED);
-
pgd = pgd_offset(mm, addr);
pud = pud_alloc(mm, pgd, addr);
if (!pud)
@@ -122,8 +116,6 @@
pte_t pte_val;
spinlock_t *ptl;
- BUG_ON(vma->vm_flags & VM_UNPAGED);
-
pgd = pgd_offset(mm, addr);
pud = pud_alloc(mm, pgd, addr);
if (!pud)
diff --git a/mm/madvise.c b/mm/madvise.c
index 328a3bc..2b7cf04 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -126,7 +126,7 @@
unsigned long start, unsigned long end)
{
*prev = vma;
- if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_UNPAGED))
+ if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
return -EINVAL;
if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
diff --git a/mm/memory.c b/mm/memory.c
index d1f46f4..9ab206b 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -333,9 +333,9 @@
}
/*
- * This function is called to print an error when a pte in a
- * !VM_UNPAGED region is found pointing to an invalid pfn (which
- * is an error.
+ * This function is called to print an error when a bad pte
+ * is found. For example, we might have a PFN-mapped pte in
+ * a region that doesn't allow it.
*
* The calling function must still handle the error.
*/
@@ -350,19 +350,56 @@
}
/*
- * page_is_anon applies strict checks for an anonymous page belonging to
- * this vma at this address. It is used on VM_UNPAGED vmas, which are
- * usually populated with shared originals (which must not be counted),
- * but occasionally contain private COWed copies (when !VM_SHARED, or
- * perhaps via ptrace when VM_SHARED). An mmap of /dev/mem might window
- * free pages, pages from other processes, or from other parts of this:
- * it's tricky, but try not to be deceived by foreign anonymous pages.
+ * This function gets the "struct page" associated with a pte.
+ *
+ * NOTE! Some mappings do not have "struct pages". A raw PFN mapping
+ * will have each page table entry just pointing to a raw page frame
+ * number, and as far as the VM layer is concerned, those do not have
+ * pages associated with them - even if the PFN might point to memory
+ * that otherwise is perfectly fine and has a "struct page".
+ *
+ * The way we recognize those mappings is through the rules set up
+ * by "remap_pfn_range()": the vma will have the VM_PFNMAP bit set,
+ * and the vm_pgoff will point to the first PFN mapped: thus every
+ * page that is a raw mapping will always honor the rule
+ *
+ * pfn_of_page == vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT)
+ *
+ * and if that isn't true, the page has been COW'ed (in which case it
+ * _does_ have a "struct page" associated with it even if it is in a
+ * VM_PFNMAP range).
*/
-static inline int page_is_anon(struct page *page,
- struct vm_area_struct *vma, unsigned long addr)
+struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
{
- return page && PageAnon(page) && page_mapped(page) &&
- page_address_in_vma(page, vma) == addr;
+ unsigned long pfn = pte_pfn(pte);
+
+ if (vma->vm_flags & VM_PFNMAP) {
+ unsigned long off = (addr - vma->vm_start) >> PAGE_SHIFT;
+ if (pfn == vma->vm_pgoff + off)
+ return NULL;
+ }
+
+ /*
+ * Add some anal sanity checks for now. Eventually,
+ * we should just do "return pfn_to_page(pfn)", but
+ * in the meantime we check that we get a valid pfn,
+ * and that the resulting page looks ok.
+ *
+ * Remove this test eventually!
+ */
+ if (unlikely(!pfn_valid(pfn))) {
+ print_bad_pte(vma, pte, addr);
+ return NULL;
+ }
+
+ /*
+ * NOTE! We still have PageReserved() pages in the page
+ * tables.
+ *
+ * The PAGE_ZERO() pages and various VDSO mappings can
+ * cause them to exist.
+ */
+ return pfn_to_page(pfn);
}
/*
@@ -379,7 +416,6 @@
unsigned long vm_flags = vma->vm_flags;
pte_t pte = *src_pte;
struct page *page;
- unsigned long pfn;
/* pte contains position in swap or file, so copy. */
if (unlikely(!pte_present(pte))) {
@@ -397,22 +433,6 @@
goto out_set_pte;
}
- pfn = pte_pfn(pte);
- page = pfn_valid(pfn)? pfn_to_page(pfn): NULL;
-
- if (unlikely(vm_flags & VM_UNPAGED))
- if (!page_is_anon(page, vma, addr))
- goto out_set_pte;
-
- /*
- * If the pte points outside of valid memory but
- * the region is not VM_UNPAGED, we have a problem.
- */
- if (unlikely(!page)) {
- print_bad_pte(vma, pte, addr);
- goto out_set_pte; /* try to do something sane */
- }
-
/*
* If it's a COW mapping, write protect it both
* in the parent and the child
@@ -429,9 +449,13 @@
if (vm_flags & VM_SHARED)
pte = pte_mkclean(pte);
pte = pte_mkold(pte);
- get_page(page);
- page_dup_rmap(page);
- rss[!!PageAnon(page)]++;
+
+ page = vm_normal_page(vma, addr, pte);
+ if (page) {
+ get_page(page);
+ page_dup_rmap(page);
+ rss[!!PageAnon(page)]++;
+ }
out_set_pte:
set_pte_at(dst_mm, addr, dst_pte, pte);
@@ -543,7 +567,7 @@
* readonly mappings. The tradeoff is that copy_page_range is more
* efficient than faulting.
*/
- if (!(vma->vm_flags & (VM_HUGETLB|VM_NONLINEAR|VM_UNPAGED))) {
+ if (!(vma->vm_flags & (VM_HUGETLB|VM_NONLINEAR|VM_PFNMAP))) {
if (!vma->anon_vma)
return 0;
}
@@ -584,19 +608,10 @@
}
if (pte_present(ptent)) {
struct page *page;
- unsigned long pfn;
(*zap_work) -= PAGE_SIZE;
- pfn = pte_pfn(ptent);
- page = pfn_valid(pfn)? pfn_to_page(pfn): NULL;
-
- if (unlikely(vma->vm_flags & VM_UNPAGED)) {
- if (!page_is_anon(page, vma, addr))
- page = NULL;
- } else if (unlikely(!page))
- print_bad_pte(vma, ptent, addr);
-
+ page = vm_normal_page(vma, addr, ptent);
if (unlikely(details) && page) {
/*
* unmap_shared_mapping_pages() wants to
@@ -852,7 +867,7 @@
/*
* Do a quick page-table lookup for a single page.
*/
-struct page *follow_page(struct mm_struct *mm, unsigned long address,
+struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
unsigned int flags)
{
pgd_t *pgd;
@@ -860,8 +875,8 @@
pmd_t *pmd;
pte_t *ptep, pte;
spinlock_t *ptl;
- unsigned long pfn;
struct page *page;
+ struct mm_struct *mm = vma->vm_mm;
page = follow_huge_addr(mm, address, flags & FOLL_WRITE);
if (!IS_ERR(page)) {
@@ -897,11 +912,10 @@
goto unlock;
if ((flags & FOLL_WRITE) && !pte_write(pte))
goto unlock;
- pfn = pte_pfn(pte);
- if (!pfn_valid(pfn))
+ page = vm_normal_page(vma, address, pte);
+ if (unlikely(!page))
goto unlock;
- page = pfn_to_page(pfn);
if (flags & FOLL_GET)
get_page(page);
if (flags & FOLL_TOUCH) {
@@ -974,8 +988,10 @@
return i ? : -EFAULT;
}
if (pages) {
- pages[i] = pte_page(*pte);
- get_page(pages[i]);
+ struct page *page = vm_normal_page(vma, start, *pte);
+ pages[i] = page;
+ if (page)
+ get_page(page);
}
pte_unmap(pte);
if (vmas)
@@ -1010,7 +1026,7 @@
foll_flags |= FOLL_WRITE;
cond_resched();
- while (!(page = follow_page(mm, start, foll_flags))) {
+ while (!(page = follow_page(vma, start, foll_flags))) {
int ret;
ret = __handle_mm_fault(mm, vma, start,
foll_flags & FOLL_WRITE);
@@ -1214,11 +1230,12 @@
* in 2.6 the LRU scan won't even find its pages, so this
* flag means no more than count its pages in reserved_vm,
* and omit it from core dump, even when VM_IO turned off.
- * VM_UNPAGED tells the core MM not to "manage" these pages
- * (e.g. refcount, mapcount, try to swap them out): in
- * particular, zap_pte_range does not try to free them.
+ * VM_PFNMAP tells the core MM that the base pages are just
+ * raw PFN mappings, and do not have a "struct page" associated
+ * with them.
*/
- vma->vm_flags |= VM_IO | VM_RESERVED | VM_UNPAGED;
+ vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
+ vma->vm_pgoff = pfn;
BUG_ON(addr >= end);
pfn -= addr >> PAGE_SHIFT;
@@ -1273,6 +1290,26 @@
return pte;
}
+static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va)
+{
+ /*
+ * If the source page was a PFN mapping, we don't have
+ * a "struct page" for it. We do a best-effort copy by
+ * just copying from the original user address. If that
+ * fails, we just zero-fill it. Live with it.
+ */
+ if (unlikely(!src)) {
+ void *kaddr = kmap_atomic(dst, KM_USER0);
+ unsigned long left = __copy_from_user_inatomic(kaddr, (void __user *)va, PAGE_SIZE);
+ if (left)
+ memset(kaddr, 0, PAGE_SIZE);
+ kunmap_atomic(kaddr, KM_USER0);
+ return;
+
+ }
+ copy_user_highpage(dst, src, va);
+}
+
/*
* This routine handles present pages, when users try to write
* to a shared page. It is done by copying the page to a new address
@@ -1296,28 +1333,13 @@
spinlock_t *ptl, pte_t orig_pte)
{
struct page *old_page, *src_page, *new_page;
- unsigned long pfn = pte_pfn(orig_pte);
pte_t entry;
int ret = VM_FAULT_MINOR;
- if (unlikely(!pfn_valid(pfn))) {
- /*
- * Page table corrupted: show pte and kill process.
- * Or it's an attempt to COW an out-of-map VM_UNPAGED
- * entry, which copy_user_highpage does not support.
- */
- print_bad_pte(vma, orig_pte, address);
- ret = VM_FAULT_OOM;
- goto unlock;
- }
- old_page = pfn_to_page(pfn);
+ old_page = vm_normal_page(vma, address, orig_pte);
src_page = old_page;
-
- if (unlikely(vma->vm_flags & VM_UNPAGED))
- if (!page_is_anon(old_page, vma, address)) {
- old_page = NULL;
- goto gotten;
- }
+ if (!old_page)
+ goto gotten;
if (PageAnon(old_page) && !TestSetPageLocked(old_page)) {
int reuse = can_share_swap_page(old_page);
@@ -1351,7 +1373,7 @@
new_page = alloc_page_vma(GFP_HIGHUSER, vma, address);
if (!new_page)
goto oom;
- copy_user_highpage(new_page, src_page, address);
+ cow_user_page(new_page, src_page, address);
}
/*
@@ -1812,16 +1834,7 @@
spinlock_t *ptl;
pte_t entry;
- /*
- * A VM_UNPAGED vma will normally be filled with present ptes
- * by remap_pfn_range, and never arrive here; but it might have
- * holes, or if !VM_DONTEXPAND, mremap might have expanded it.
- * It's weird enough handling anon pages in unpaged vmas, we do
- * not want to worry about ZERO_PAGEs too (it may or may not
- * matter if their counts wrap): just give them anon pages.
- */
-
- if (write_access || (vma->vm_flags & VM_UNPAGED)) {
+ if (write_access) {
/* Allocate our own private page. */
pte_unmap(page_table);
@@ -1896,8 +1909,6 @@
int anon = 0;
pte_unmap(page_table);
- BUG_ON(vma->vm_flags & VM_UNPAGED);
-
if (vma->vm_file) {
mapping = vma->vm_file->f_mapping;
sequence = mapping->truncate_count;
@@ -1930,7 +1941,7 @@
page = alloc_page_vma(GFP_HIGHUSER, vma, address);
if (!page)
goto oom;
- copy_user_highpage(page, new_page, address);
+ cow_user_page(page, new_page, address);
page_cache_release(new_page);
new_page = page;
anon = 1;
@@ -2149,6 +2160,12 @@
spin_unlock(&mm->page_table_lock);
return 0;
}
+#else
+/* Workaround for gcc 2.96 */
+int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
+{
+ return 0;
+}
#endif /* __PAGETABLE_PUD_FOLDED */
#ifndef __PAGETABLE_PMD_FOLDED
@@ -2177,6 +2194,12 @@
spin_unlock(&mm->page_table_lock);
return 0;
}
+#else
+/* Workaround for gcc 2.96 */
+int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
+{
+ return 0;
+}
#endif /* __PAGETABLE_PMD_FOLDED */
int make_pages_present(unsigned long addr, unsigned long end)
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 5609a31..bec88c8 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -189,17 +189,15 @@
orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
do {
- unsigned long pfn;
+ struct page *page;
unsigned int nid;
if (!pte_present(*pte))
continue;
- pfn = pte_pfn(*pte);
- if (!pfn_valid(pfn)) {
- print_bad_pte(vma, *pte, addr);
+ page = vm_normal_page(vma, addr, *pte);
+ if (!page)
continue;
- }
- nid = pfn_to_nid(pfn);
+ nid = page_to_nid(page);
if (!node_isset(nid, *nodes))
break;
} while (pte++, addr += PAGE_SIZE, addr != end);
@@ -269,8 +267,6 @@
first = find_vma(mm, start);
if (!first)
return ERR_PTR(-EFAULT);
- if (first->vm_flags & VM_UNPAGED)
- return ERR_PTR(-EACCES);
prev = NULL;
for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) {
if (!vma->vm_next && vma->vm_end < end)
diff --git a/mm/msync.c b/mm/msync.c
index b3f4caf..1b5b6f6 100644
--- a/mm/msync.c
+++ b/mm/msync.c
@@ -27,7 +27,6 @@
again:
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
do {
- unsigned long pfn;
struct page *page;
if (progress >= 64) {
@@ -40,13 +39,9 @@
continue;
if (!pte_maybe_dirty(*pte))
continue;
- pfn = pte_pfn(*pte);
- if (unlikely(!pfn_valid(pfn))) {
- print_bad_pte(vma, *pte, addr);
+ page = vm_normal_page(vma, addr, *pte);
+ if (!page)
continue;
- }
- page = pfn_to_page(pfn);
-
if (ptep_clear_flush_dirty(vma, addr, pte) ||
page_test_and_clear_dirty(page))
set_page_dirty(page);
@@ -97,9 +92,8 @@
/* For hugepages we can't go walking the page table normally,
* but that's ok, hugetlbfs is memory based, so we don't need
* to do anything more on an msync().
- * Can't do anything with VM_UNPAGED regions either.
*/
- if (vma->vm_flags & (VM_HUGETLB|VM_UNPAGED))
+ if (vma->vm_flags & VM_HUGETLB)
return;
BUG_ON(addr >= end);
diff --git a/mm/nommu.c b/mm/nommu.c
index 6deb6ab..c119681 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -1045,7 +1045,7 @@
EXPORT_SYMBOL(find_vma);
-struct page *follow_page(struct mm_struct *mm, unsigned long address,
+struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
unsigned int foll_flags)
{
return NULL;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 1731236..b257720 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -773,9 +773,12 @@
}
#define ALLOC_NO_WATERMARKS 0x01 /* don't check watermarks at all */
-#define ALLOC_HARDER 0x02 /* try to alloc harder */
-#define ALLOC_HIGH 0x04 /* __GFP_HIGH set */
-#define ALLOC_CPUSET 0x08 /* check for correct cpuset */
+#define ALLOC_WMARK_MIN 0x02 /* use pages_min watermark */
+#define ALLOC_WMARK_LOW 0x04 /* use pages_low watermark */
+#define ALLOC_WMARK_HIGH 0x08 /* use pages_high watermark */
+#define ALLOC_HARDER 0x10 /* try to alloc harder */
+#define ALLOC_HIGH 0x20 /* __GFP_HIGH set */
+#define ALLOC_CPUSET 0x40 /* check for correct cpuset */
/*
* Return 1 if free pages are above 'mark'. This takes into account the order
@@ -830,7 +833,14 @@
continue;
if (!(alloc_flags & ALLOC_NO_WATERMARKS)) {
- if (!zone_watermark_ok(*z, order, (*z)->pages_low,
+ unsigned long mark;
+ if (alloc_flags & ALLOC_WMARK_MIN)
+ mark = (*z)->pages_min;
+ else if (alloc_flags & ALLOC_WMARK_LOW)
+ mark = (*z)->pages_low;
+ else
+ mark = (*z)->pages_high;
+ if (!zone_watermark_ok(*z, order, mark,
classzone_idx, alloc_flags))
continue;
}
@@ -871,7 +881,7 @@
}
page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, order,
- zonelist, ALLOC_CPUSET);
+ zonelist, ALLOC_WMARK_LOW|ALLOC_CPUSET);
if (page)
goto got_pg;
@@ -888,7 +898,7 @@
* cannot run direct reclaim, or if the caller has realtime scheduling
* policy.
*/
- alloc_flags = 0;
+ alloc_flags = ALLOC_WMARK_MIN;
if ((unlikely(rt_task(p)) && !in_interrupt()) || !wait)
alloc_flags |= ALLOC_HARDER;
if (gfp_mask & __GFP_HIGH)
@@ -959,7 +969,7 @@
* under heavy pressure.
*/
page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, order,
- zonelist, ALLOC_CPUSET);
+ zonelist, ALLOC_WMARK_HIGH|ALLOC_CPUSET);
if (page)
goto got_pg;
diff --git a/mm/rmap.c b/mm/rmap.c
index 2e034a0..491ac35 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -226,8 +226,6 @@
/*
* At what user virtual address is page expected in vma? checking that the
* page matches the vma: currently only used on anon pages, by unuse_vma;
- * and by extraordinary checks on anon pages in VM_UNPAGED vmas, taking
- * care that an mmap of /dev/mem might window free and foreign pages.
*/
unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
{
@@ -292,7 +290,7 @@
* repeatedly from either page_referenced_anon or page_referenced_file.
*/
static int page_referenced_one(struct page *page,
- struct vm_area_struct *vma, unsigned int *mapcount, int ignore_token)
+ struct vm_area_struct *vma, unsigned int *mapcount)
{
struct mm_struct *mm = vma->vm_mm;
unsigned long address;
@@ -313,7 +311,7 @@
/* Pretend the page is referenced if the task has the
swap token and is in the middle of a page fault. */
- if (mm != current->mm && !ignore_token && has_swap_token(mm) &&
+ if (mm != current->mm && has_swap_token(mm) &&
rwsem_is_locked(&mm->mmap_sem))
referenced++;
@@ -323,7 +321,7 @@
return referenced;
}
-static int page_referenced_anon(struct page *page, int ignore_token)
+static int page_referenced_anon(struct page *page)
{
unsigned int mapcount;
struct anon_vma *anon_vma;
@@ -336,8 +334,7 @@
mapcount = page_mapcount(page);
list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
- referenced += page_referenced_one(page, vma, &mapcount,
- ignore_token);
+ referenced += page_referenced_one(page, vma, &mapcount);
if (!mapcount)
break;
}
@@ -356,7 +353,7 @@
*
* This function is only called from page_referenced for object-based pages.
*/
-static int page_referenced_file(struct page *page, int ignore_token)
+static int page_referenced_file(struct page *page)
{
unsigned int mapcount;
struct address_space *mapping = page->mapping;
@@ -394,8 +391,7 @@
referenced++;
break;
}
- referenced += page_referenced_one(page, vma, &mapcount,
- ignore_token);
+ referenced += page_referenced_one(page, vma, &mapcount);
if (!mapcount)
break;
}
@@ -412,13 +408,10 @@
* Quick test_and_clear_referenced for all mappings to a page,
* returns the number of ptes which referenced the page.
*/
-int page_referenced(struct page *page, int is_locked, int ignore_token)
+int page_referenced(struct page *page, int is_locked)
{
int referenced = 0;
- if (!swap_token_default_timeout)
- ignore_token = 1;
-
if (page_test_and_clear_young(page))
referenced++;
@@ -427,15 +420,14 @@
if (page_mapped(page) && page->mapping) {
if (PageAnon(page))
- referenced += page_referenced_anon(page, ignore_token);
+ referenced += page_referenced_anon(page);
else if (is_locked)
- referenced += page_referenced_file(page, ignore_token);
+ referenced += page_referenced_file(page);
else if (TestSetPageLocked(page))
referenced++;
else {
if (page->mapping)
- referenced += page_referenced_file(page,
- ignore_token);
+ referenced += page_referenced_file(page);
unlock_page(page);
}
}
@@ -614,7 +606,6 @@
struct page *page;
unsigned long address;
unsigned long end;
- unsigned long pfn;
address = (vma->vm_start + cursor) & CLUSTER_MASK;
end = address + CLUSTER_SIZE;
@@ -643,15 +634,8 @@
for (; address < end; pte++, address += PAGE_SIZE) {
if (!pte_present(*pte))
continue;
-
- pfn = pte_pfn(*pte);
- if (unlikely(!pfn_valid(pfn))) {
- print_bad_pte(vma, *pte, address);
- continue;
- }
-
- page = pfn_to_page(pfn);
- BUG_ON(PageAnon(page));
+ page = vm_normal_page(vma, address, *pte);
+ BUG_ON(!page || PageAnon(page));
if (ptep_clear_flush_young(vma, address, pte))
continue;
diff --git a/mm/thrash.c b/mm/thrash.c
index eff3c18..f4c560b 100644
--- a/mm/thrash.c
+++ b/mm/thrash.c
@@ -57,14 +57,17 @@
/* We have the token. Let others know we still need it. */
if (has_swap_token(current->mm)) {
current->mm->recent_pagein = 1;
+ if (unlikely(!swap_token_default_timeout))
+ disable_swap_token();
return;
}
if (time_after(jiffies, swap_token_check)) {
- /* Can't get swapout protection if we exceed our RSS limit. */
- // if (current->mm->rss > current->mm->rlimit_rss)
- // return;
+ if (!swap_token_default_timeout) {
+ swap_token_check = jiffies + SWAP_TOKEN_CHECK_INTERVAL;
+ return;
+ }
/* ... or if we recently held the token. */
if (time_before(jiffies, current->mm->swap_token_time))
@@ -95,6 +98,7 @@
{
spin_lock(&swap_token_lock);
if (likely(mm == swap_token_mm)) {
+ mm->swap_token_time = jiffies + SWAP_TOKEN_CHECK_INTERVAL;
swap_token_mm = &init_mm;
swap_token_check = jiffies;
}
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 2813054..b0cd81c 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -201,13 +201,25 @@
list_for_each_entry(shrinker, &shrinker_list, list) {
unsigned long long delta;
unsigned long total_scan;
+ unsigned long max_pass = (*shrinker->shrinker)(0, gfp_mask);
delta = (4 * scanned) / shrinker->seeks;
- delta *= (*shrinker->shrinker)(0, gfp_mask);
+ delta *= max_pass;
do_div(delta, lru_pages + 1);
shrinker->nr += delta;
- if (shrinker->nr < 0)
- shrinker->nr = LONG_MAX; /* It wrapped! */
+ if (shrinker->nr < 0) {
+ printk(KERN_ERR "%s: nr=%ld\n",
+ __FUNCTION__, shrinker->nr);
+ shrinker->nr = max_pass;
+ }
+
+ /*
+ * Avoid risking looping forever due to too large nr value:
+ * never try to free more than twice the estimate number of
+ * freeable entries.
+ */
+ if (shrinker->nr > max_pass * 2)
+ shrinker->nr = max_pass * 2;
total_scan = shrinker->nr;
shrinker->nr = 0;
@@ -407,7 +419,7 @@
if (PageWriteback(page))
goto keep_locked;
- referenced = page_referenced(page, 1, sc->priority <= 0);
+ referenced = page_referenced(page, 1);
/* In active use or really unfreeable? Activate it. */
if (referenced && page_mapping_inuse(page))
goto activate_locked;
@@ -756,7 +768,7 @@
if (page_mapped(page)) {
if (!reclaim_mapped ||
(total_swap_pages == 0 && PageAnon(page)) ||
- page_referenced(page, 0, sc->priority <= 0)) {
+ page_referenced(page, 0)) {
list_add(&page->lru, &l_active);
continue;
}
@@ -960,6 +972,8 @@
sc.nr_reclaimed = 0;
sc.priority = priority;
sc.swap_cluster_max = SWAP_CLUSTER_MAX;
+ if (!priority)
+ disable_swap_token();
shrink_caches(zones, &sc);
shrink_slab(sc.nr_scanned, gfp_mask, lru_pages);
if (reclaim_state) {
@@ -1056,6 +1070,10 @@
int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */
unsigned long lru_pages = 0;
+ /* The swap token gets in the way of swapout... */
+ if (!priority)
+ disable_swap_token();
+
all_zones_ok = 1;
if (nr_pages == 0) {
@@ -1360,6 +1378,7 @@
sc.nr_reclaimed = 0;
/* scan at the highest priority */
sc.priority = 0;
+ disable_swap_token();
if (nr_pages > SWAP_CLUSTER_MAX)
sc.swap_cluster_max = nr_pages;
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index 81e00a6..e3b242d 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -39,23 +39,27 @@
#define RPC_UPCALL_TIMEOUT (30*HZ)
static void
+__rpc_purge_list(struct rpc_inode *rpci, struct list_head *head, int err)
+{
+ struct rpc_pipe_msg *msg;
+ void (*destroy_msg)(struct rpc_pipe_msg *);
+
+ destroy_msg = rpci->ops->destroy_msg;
+ while (!list_empty(head)) {
+ msg = list_entry(head->next, struct rpc_pipe_msg, list);
+ list_del_init(&msg->list);
+ msg->errno = err;
+ destroy_msg(msg);
+ }
+}
+
+static void
__rpc_purge_upcall(struct inode *inode, int err)
{
struct rpc_inode *rpci = RPC_I(inode);
- struct rpc_pipe_msg *msg;
- while (!list_empty(&rpci->pipe)) {
- msg = list_entry(rpci->pipe.next, struct rpc_pipe_msg, list);
- list_del_init(&msg->list);
- msg->errno = err;
- rpci->ops->destroy_msg(msg);
- }
- while (!list_empty(&rpci->in_upcall)) {
- msg = list_entry(rpci->pipe.next, struct rpc_pipe_msg, list);
- list_del_init(&msg->list);
- msg->errno = err;
- rpci->ops->destroy_msg(msg);
- }
+ __rpc_purge_list(rpci, &rpci->pipe, err);
+ __rpc_purge_list(rpci, &rpci->in_upcall, err);
rpci->pipelen = 0;
wake_up(&rpci->waitq);
}