Merge master.kernel.org:/pub/scm/linux/kernel/git/bart/ide-2.6
* master.kernel.org:/pub/scm/linux/kernel/git/bart/ide-2.6:
ide/Kconfig: add missing range check for IDE_MAX_HWIFS
hpt366: fix kernel oops with HPT302N
ide/pci/delkin_cb.c: add new PCI ID
diff --git a/arch/i386/kernel/cpu/cpufreq/longhaul.c b/arch/i386/kernel/cpu/cpufreq/longhaul.c
index a1f1b71..2b030d6 100644
--- a/arch/i386/kernel/cpu/cpufreq/longhaul.c
+++ b/arch/i386/kernel/cpu/cpufreq/longhaul.c
@@ -758,7 +758,7 @@
NULL, (void *)&pr);
/* Check ACPI support for C3 state */
- if (pr != NULL && longhaul_version != TYPE_LONGHAUL_V1) {
+ if (pr != NULL && longhaul_version == TYPE_POWERSAVER) {
cx = &pr->power.states[ACPI_STATE_C3];
if (cx->address > 0 && cx->latency <= 1000) {
longhaul_flags |= USE_ACPI_C3;
diff --git a/arch/mips/kernel/r2300_switch.S b/arch/mips/kernel/r2300_switch.S
index 28c2e2e..656bde2 100644
--- a/arch/mips/kernel/r2300_switch.S
+++ b/arch/mips/kernel/r2300_switch.S
@@ -49,7 +49,8 @@
#ifndef CONFIG_CPU_HAS_LLSC
sw zero, ll_bit
#endif
- mfc0 t2, CP0_STATUS
+ mfc0 t1, CP0_STATUS
+ sw t1, THREAD_STATUS(a0)
cpu_save_nonscratch a0
sw ra, THREAD_REG31(a0)
@@ -59,8 +60,8 @@
lw t3, TASK_THREAD_INFO(a0)
lw t0, TI_FLAGS(t3)
li t1, _TIF_USEDFPU
- and t1, t0
- beqz t1, 1f
+ and t2, t0, t1
+ beqz t2, 1f
nor t1, zero, t1
and t0, t0, t1
@@ -73,13 +74,10 @@
li t1, ~ST0_CU1
and t0, t0, t1
sw t0, ST_OFF(t3)
- /* clear thread_struct CU1 bit */
- and t2, t1
fpu_save_single a0, t0 # clobbers t0
1:
- sw t2, THREAD_STATUS(a0)
/*
* The order of restoring the registers takes care of the race
* updating $28, $29 and kernelsp without disabling ints.
diff --git a/arch/mips/kernel/r4k_switch.S b/arch/mips/kernel/r4k_switch.S
index c7698fd..cc566cf 100644
--- a/arch/mips/kernel/r4k_switch.S
+++ b/arch/mips/kernel/r4k_switch.S
@@ -48,7 +48,8 @@
#ifndef CONFIG_CPU_HAS_LLSC
sw zero, ll_bit
#endif
- mfc0 t2, CP0_STATUS
+ mfc0 t1, CP0_STATUS
+ LONG_S t1, THREAD_STATUS(a0)
cpu_save_nonscratch a0
LONG_S ra, THREAD_REG31(a0)
@@ -58,8 +59,8 @@
PTR_L t3, TASK_THREAD_INFO(a0)
LONG_L t0, TI_FLAGS(t3)
li t1, _TIF_USEDFPU
- and t1, t0
- beqz t1, 1f
+ and t2, t0, t1
+ beqz t2, 1f
nor t1, zero, t1
and t0, t0, t1
@@ -72,13 +73,10 @@
li t1, ~ST0_CU1
and t0, t0, t1
LONG_S t0, ST_OFF(t3)
- /* clear thread_struct CU1 bit */
- and t2, t1
fpu_save_double a0 t0 t1 # c0_status passed in t0
# clobbers t1
1:
- LONG_S t2, THREAD_STATUS(a0)
/*
* The order of restoring the registers takes care of the race
diff --git a/arch/mips/kernel/signal-common.h b/arch/mips/kernel/signal-common.h
index 297dfcb..c0faabd 100644
--- a/arch/mips/kernel/signal-common.h
+++ b/arch/mips/kernel/signal-common.h
@@ -34,4 +34,13 @@
/* Check and clear pending FPU exceptions in saved CSR */
extern int fpcsr_pending(unsigned int __user *fpcsr);
+/* Make sure we will not lose FPU ownership */
+#ifdef CONFIG_PREEMPT
+#define lock_fpu_owner() preempt_disable()
+#define unlock_fpu_owner() preempt_enable()
+#else
+#define lock_fpu_owner() pagefault_disable()
+#define unlock_fpu_owner() pagefault_enable()
+#endif
+
#endif /* __SIGNAL_COMMON_H */
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c
index 8c3c5a5..07d6730 100644
--- a/arch/mips/kernel/signal.c
+++ b/arch/mips/kernel/signal.c
@@ -20,6 +20,7 @@
#include <linux/ptrace.h>
#include <linux/unistd.h>
#include <linux/compiler.h>
+#include <linux/uaccess.h>
#include <asm/abi.h>
#include <asm/asm.h>
@@ -27,7 +28,6 @@
#include <asm/cacheflush.h>
#include <asm/fpu.h>
#include <asm/sim.h>
-#include <asm/uaccess.h>
#include <asm/ucontext.h>
#include <asm/cpu-features.h>
#include <asm/war.h>
@@ -78,6 +78,46 @@
/*
* Helper routines
*/
+static int protected_save_fp_context(struct sigcontext __user *sc)
+{
+ int err;
+ while (1) {
+ lock_fpu_owner();
+ own_fpu_inatomic(1);
+ err = save_fp_context(sc); /* this might fail */
+ unlock_fpu_owner();
+ if (likely(!err))
+ break;
+ /* touch the sigcontext and try again */
+ err = __put_user(0, &sc->sc_fpregs[0]) |
+ __put_user(0, &sc->sc_fpregs[31]) |
+ __put_user(0, &sc->sc_fpc_csr);
+ if (err)
+ break; /* really bad sigcontext */
+ }
+ return err;
+}
+
+static int protected_restore_fp_context(struct sigcontext __user *sc)
+{
+ int err, tmp;
+ while (1) {
+ lock_fpu_owner();
+ own_fpu_inatomic(0);
+ err = restore_fp_context(sc); /* this might fail */
+ unlock_fpu_owner();
+ if (likely(!err))
+ break;
+ /* touch the sigcontext and try again */
+ err = __get_user(tmp, &sc->sc_fpregs[0]) |
+ __get_user(tmp, &sc->sc_fpregs[31]) |
+ __get_user(tmp, &sc->sc_fpc_csr);
+ if (err)
+ break; /* really bad sigcontext */
+ }
+ return err;
+}
+
int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
{
int err = 0;
@@ -113,10 +153,7 @@
* Save FPU state to signal context. Signal handler
* will "inherit" current FPU state.
*/
- own_fpu(1);
- enable_fp_in_kernel();
- err |= save_fp_context(sc);
- disable_fp_in_kernel();
+ err |= protected_save_fp_context(sc);
}
return err;
}
@@ -148,7 +185,7 @@
err = sig = fpcsr_pending(&sc->sc_fpc_csr);
if (err > 0)
err = 0;
- err |= restore_fp_context(sc);
+ err |= protected_restore_fp_context(sc);
return err ?: sig;
}
@@ -187,11 +224,8 @@
if (used_math) {
/* restore fpu context if we have used it before */
- own_fpu(0);
- enable_fp_in_kernel();
if (!err)
err = check_and_restore_fp_context(sc);
- disable_fp_in_kernel();
} else {
/* signal handler may have used FPU. Give it up. */
lose_fpu(0);
diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c
index 151fd2f..b9a0144 100644
--- a/arch/mips/kernel/signal32.c
+++ b/arch/mips/kernel/signal32.c
@@ -22,6 +22,7 @@
#include <linux/compat.h>
#include <linux/suspend.h>
#include <linux/compiler.h>
+#include <linux/uaccess.h>
#include <asm/abi.h>
#include <asm/asm.h>
@@ -29,7 +30,6 @@
#include <linux/bitops.h>
#include <asm/cacheflush.h>
#include <asm/sim.h>
-#include <asm/uaccess.h>
#include <asm/ucontext.h>
#include <asm/system.h>
#include <asm/fpu.h>
@@ -176,6 +176,46 @@
/*
* sigcontext handlers
*/
+static int protected_save_fp_context32(struct sigcontext32 __user *sc)
+{
+ int err;
+ while (1) {
+ lock_fpu_owner();
+ own_fpu_inatomic(1);
+ err = save_fp_context32(sc); /* this might fail */
+ unlock_fpu_owner();
+ if (likely(!err))
+ break;
+ /* touch the sigcontext and try again */
+ err = __put_user(0, &sc->sc_fpregs[0]) |
+ __put_user(0, &sc->sc_fpregs[31]) |
+ __put_user(0, &sc->sc_fpc_csr);
+ if (err)
+ break; /* really bad sigcontext */
+ }
+ return err;
+}
+
+static int protected_restore_fp_context32(struct sigcontext32 __user *sc)
+{
+ int err, tmp;
+ while (1) {
+ lock_fpu_owner();
+ own_fpu_inatomic(0);
+ err = restore_fp_context32(sc); /* this might fail */
+ unlock_fpu_owner();
+ if (likely(!err))
+ break;
+ /* touch the sigcontext and try again */
+ err = __get_user(tmp, &sc->sc_fpregs[0]) |
+ __get_user(tmp, &sc->sc_fpregs[31]) |
+ __get_user(tmp, &sc->sc_fpc_csr);
+ if (err)
+ break; /* really bad sigcontext */
+ }
+ return err;
+}
+
static int setup_sigcontext32(struct pt_regs *regs,
struct sigcontext32 __user *sc)
{
@@ -209,10 +249,7 @@
* Save FPU state to signal context. Signal handler
* will "inherit" current FPU state.
*/
- own_fpu(1);
- enable_fp_in_kernel();
- err |= save_fp_context32(sc);
- disable_fp_in_kernel();
+ err |= protected_save_fp_context32(sc);
}
return err;
}
@@ -225,7 +262,7 @@
err = sig = fpcsr_pending(&sc->sc_fpc_csr);
if (err > 0)
err = 0;
- err |= restore_fp_context32(sc);
+ err |= protected_restore_fp_context32(sc);
return err ?: sig;
}
@@ -261,11 +298,8 @@
if (used_math) {
/* restore fpu context if we have used it before */
- own_fpu(0);
- enable_fp_in_kernel();
if (!err)
err = check_and_restore_fp_context32(sc);
- disable_fp_in_kernel();
} else {
/* signal handler may have used FPU. Give it up. */
lose_fpu(0);
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 7d76a85..493cb29 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -650,7 +650,7 @@
unsigned int opcode, bcode;
siginfo_t info;
- if (get_user(opcode, (unsigned int __user *) exception_epc(regs)))
+ if (__get_user(opcode, (unsigned int __user *) exception_epc(regs)))
goto out_sigsegv;
/*
@@ -700,7 +700,7 @@
unsigned int opcode, tcode = 0;
siginfo_t info;
- if (get_user(opcode, (unsigned int __user *) exception_epc(regs)))
+ if (__get_user(opcode, (unsigned int __user *) exception_epc(regs)))
goto out_sigsegv;
/* Immediate versions don't provide a code. */
@@ -757,11 +757,12 @@
{
unsigned int cpid;
+ die_if_kernel("do_cpu invoked from kernel context!", regs);
+
cpid = (regs->cp0_cause >> CAUSEB_CE) & 3;
switch (cpid) {
case 0:
- die_if_kernel("do_cpu invoked from kernel context!", regs);
if (!cpu_has_llsc)
if (!simulate_llsc(regs))
return;
@@ -772,9 +773,6 @@
break;
case 1:
- if (!test_thread_flag(TIF_ALLOW_FP_IN_KERNEL))
- die_if_kernel("do_cpu invoked from kernel context!",
- regs);
if (used_math()) /* Using the FPU again. */
own_fpu(1);
else { /* First time FPU user. */
@@ -782,19 +780,7 @@
set_used_math();
}
- if (raw_cpu_has_fpu) {
- if (test_thread_flag(TIF_ALLOW_FP_IN_KERNEL)) {
- local_irq_disable();
- if (cpu_has_fpu)
- regs->cp0_status |= ST0_CU1;
- /*
- * We must return without enabling
- * interrupts to ensure keep FPU
- * ownership until resume.
- */
- return;
- }
- } else {
+ if (!raw_cpu_has_fpu) {
int sig;
sig = fpu_emulator_cop1Handler(regs,
¤t->thread.fpu, 0);
@@ -836,7 +822,6 @@
case 2:
case 3:
- die_if_kernel("do_cpu invoked from kernel context!", regs);
break;
}
diff --git a/arch/mips/sibyte/sb1250/setup.c b/arch/mips/sibyte/sb1250/setup.c
index 87188f0..f4a6169 100644
--- a/arch/mips/sibyte/sb1250/setup.c
+++ b/arch/mips/sibyte/sb1250/setup.c
@@ -141,6 +141,18 @@
periph_rev = 3;
pass_str = "A2";
break;
+ case K_SYS_REVISION_BCM112x_A3:
+ periph_rev = 3;
+ pass_str = "A3";
+ break;
+ case K_SYS_REVISION_BCM112x_A4:
+ periph_rev = 3;
+ pass_str = "A4";
+ break;
+ case K_SYS_REVISION_BCM112x_B0:
+ periph_rev = 3;
+ pass_str = "B0";
+ break;
default:
printk("Unknown %s rev %x\n", soc_str, soc_pass);
ret = 1;
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index b6491c0..9e37971 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -986,9 +986,9 @@
* expire an async queue immediately if it has used up its slice. idle
* queue always expire after 1 dispatch round.
*/
- if ((!cfq_cfqq_sync(cfqq) &&
+ if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) &&
cfqd->dispatch_slice >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
- cfq_class_idle(cfqq)) {
+ cfq_class_idle(cfqq))) {
cfqq->slice_end = jiffies + 1;
cfq_slice_expired(cfqd, 0, 0);
}
@@ -1051,19 +1051,21 @@
while ((cfqq = cfq_select_queue(cfqd)) != NULL) {
int max_dispatch;
- /*
- * Don't repeat dispatch from the previous queue.
- */
- if (prev_cfqq == cfqq)
- break;
+ if (cfqd->busy_queues > 1) {
+ /*
+ * Don't repeat dispatch from the previous queue.
+ */
+ if (prev_cfqq == cfqq)
+ break;
- /*
- * So we have dispatched before in this round, if the
- * next queue has idling enabled (must be sync), don't
- * allow it service until the previous have continued.
- */
- if (cfqd->rq_in_driver && cfq_cfqq_idle_window(cfqq))
- break;
+ /*
+ * So we have dispatched before in this round, if the
+ * next queue has idling enabled (must be sync), don't
+ * allow it service until the previous have continued.
+ */
+ if (cfqd->rq_in_driver && cfq_cfqq_idle_window(cfqq))
+ break;
+ }
cfq_clear_cfqq_must_dispatch(cfqq);
cfq_clear_cfqq_wait_request(cfqq);
@@ -1370,7 +1372,9 @@
atomic_set(&cfqq->ref, 0);
cfqq->cfqd = cfqd;
- cfq_mark_cfqq_idle_window(cfqq);
+ if (key != CFQ_KEY_ASYNC)
+ cfq_mark_cfqq_idle_window(cfqq);
+
cfq_mark_cfqq_prio_changed(cfqq);
cfq_mark_cfqq_queue_new(cfqq);
cfq_init_prio_data(cfqq);
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index ad2e91b..7975589 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -38,7 +38,6 @@
static struct nfs_page * nfs_update_request(struct nfs_open_context*,
struct page *,
unsigned int, unsigned int);
-static void nfs_mark_request_dirty(struct nfs_page *req);
static long nfs_flush_mapping(struct address_space *mapping, struct writeback_control *wbc, int how);
static const struct rpc_call_ops nfs_write_partial_ops;
static const struct rpc_call_ops nfs_write_full_ops;
@@ -255,7 +254,8 @@
static int nfs_page_mark_flush(struct page *page)
{
struct nfs_page *req;
- spinlock_t *req_lock = &NFS_I(page->mapping->host)->req_lock;
+ struct nfs_inode *nfsi = NFS_I(page->mapping->host);
+ spinlock_t *req_lock = &nfsi->req_lock;
int ret;
spin_lock(req_lock);
@@ -279,11 +279,23 @@
return ret;
spin_lock(req_lock);
}
- spin_unlock(req_lock);
+ if (test_bit(PG_NEED_COMMIT, &req->wb_flags)) {
+ /* This request is marked for commit */
+ spin_unlock(req_lock);
+ nfs_unlock_request(req);
+ return 1;
+ }
if (nfs_set_page_writeback(page) == 0) {
nfs_list_remove_request(req);
- nfs_mark_request_dirty(req);
- }
+ /* add the request to the inode's dirty list. */
+ radix_tree_tag_set(&nfsi->nfs_page_tree,
+ req->wb_index, NFS_PAGE_TAG_DIRTY);
+ nfs_list_add_request(req, &nfsi->dirty);
+ nfsi->ndirty++;
+ spin_unlock(req_lock);
+ __mark_inode_dirty(page->mapping->host, I_DIRTY_PAGES);
+ } else
+ spin_unlock(req_lock);
ret = test_bit(PG_NEED_FLUSH, &req->wb_flags);
nfs_unlock_request(req);
return ret;
@@ -376,6 +388,8 @@
}
SetPagePrivate(req->wb_page);
set_page_private(req->wb_page, (unsigned long)req);
+ if (PageDirty(req->wb_page))
+ set_bit(PG_NEED_FLUSH, &req->wb_flags);
nfsi->npages++;
atomic_inc(&req->wb_count);
return 0;
@@ -395,6 +409,8 @@
set_page_private(req->wb_page, 0);
ClearPagePrivate(req->wb_page);
radix_tree_delete(&nfsi->nfs_page_tree, req->wb_index);
+ if (test_and_clear_bit(PG_NEED_FLUSH, &req->wb_flags))
+ __set_page_dirty_nobuffers(req->wb_page);
nfsi->npages--;
if (!nfsi->npages) {
spin_unlock(&nfsi->req_lock);
@@ -406,24 +422,6 @@
nfs_release_request(req);
}
-/*
- * Add a request to the inode's dirty list.
- */
-static void
-nfs_mark_request_dirty(struct nfs_page *req)
-{
- struct inode *inode = req->wb_context->dentry->d_inode;
- struct nfs_inode *nfsi = NFS_I(inode);
-
- spin_lock(&nfsi->req_lock);
- radix_tree_tag_set(&nfsi->nfs_page_tree,
- req->wb_index, NFS_PAGE_TAG_DIRTY);
- nfs_list_add_request(req, &nfsi->dirty);
- nfsi->ndirty++;
- spin_unlock(&nfsi->req_lock);
- __mark_inode_dirty(inode, I_DIRTY_PAGES);
-}
-
static void
nfs_redirty_request(struct nfs_page *req)
{
@@ -438,7 +436,7 @@
{
struct page *page = req->wb_page;
- if (page == NULL)
+ if (page == NULL || test_bit(PG_NEED_COMMIT, &req->wb_flags))
return 0;
return !PageWriteback(req->wb_page);
}
@@ -456,10 +454,48 @@
spin_lock(&nfsi->req_lock);
nfs_list_add_request(req, &nfsi->commit);
nfsi->ncommit++;
+ set_bit(PG_NEED_COMMIT, &(req)->wb_flags);
spin_unlock(&nfsi->req_lock);
inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
__mark_inode_dirty(inode, I_DIRTY_DATASYNC);
}
+
+static inline
+int nfs_write_need_commit(struct nfs_write_data *data)
+{
+ return data->verf.committed != NFS_FILE_SYNC;
+}
+
+static inline
+int nfs_reschedule_unstable_write(struct nfs_page *req)
+{
+ if (test_bit(PG_NEED_COMMIT, &req->wb_flags)) {
+ nfs_mark_request_commit(req);
+ return 1;
+ }
+ if (test_and_clear_bit(PG_NEED_RESCHED, &req->wb_flags)) {
+ nfs_redirty_request(req);
+ return 1;
+ }
+ return 0;
+}
+#else
+static inline void
+nfs_mark_request_commit(struct nfs_page *req)
+{
+}
+
+static inline
+int nfs_write_need_commit(struct nfs_write_data *data)
+{
+ return 0;
+}
+
+static inline
+int nfs_reschedule_unstable_write(struct nfs_page *req)
+{
+ return 0;
+}
#endif
/*
@@ -520,6 +556,7 @@
req = nfs_list_entry(head->next);
dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
nfs_list_remove_request(req);
+ clear_bit(PG_NEED_COMMIT, &(req)->wb_flags);
nfs_inode_remove_request(req);
nfs_unlock_request(req);
}
@@ -746,26 +783,12 @@
static void nfs_writepage_release(struct nfs_page *req)
{
- nfs_end_page_writeback(req->wb_page);
-#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
- if (!PageError(req->wb_page)) {
- if (NFS_NEED_RESCHED(req)) {
- nfs_redirty_request(req);
- goto out;
- } else if (NFS_NEED_COMMIT(req)) {
- nfs_mark_request_commit(req);
- goto out;
- }
- }
- nfs_inode_remove_request(req);
-
-out:
- nfs_clear_commit(req);
- nfs_clear_reschedule(req);
-#else
- nfs_inode_remove_request(req);
-#endif
+ if (PageError(req->wb_page) || !nfs_reschedule_unstable_write(req)) {
+ nfs_end_page_writeback(req->wb_page);
+ nfs_inode_remove_request(req);
+ } else
+ nfs_end_page_writeback(req->wb_page);
nfs_clear_page_writeback(req);
}
@@ -897,8 +920,8 @@
list_del(&data->pages);
nfs_writedata_release(data);
}
- nfs_end_page_writeback(req->wb_page);
nfs_redirty_request(req);
+ nfs_end_page_writeback(req->wb_page);
nfs_clear_page_writeback(req);
return -ENOMEM;
}
@@ -943,8 +966,8 @@
while (!list_empty(head)) {
struct nfs_page *req = nfs_list_entry(head->next);
nfs_list_remove_request(req);
- nfs_end_page_writeback(req->wb_page);
nfs_redirty_request(req);
+ nfs_end_page_writeback(req->wb_page);
nfs_clear_page_writeback(req);
}
return -ENOMEM;
@@ -979,8 +1002,8 @@
while (!list_empty(head)) {
req = nfs_list_entry(head->next);
nfs_list_remove_request(req);
- nfs_end_page_writeback(req->wb_page);
nfs_redirty_request(req);
+ nfs_end_page_writeback(req->wb_page);
nfs_clear_page_writeback(req);
}
return error;
@@ -1008,22 +1031,28 @@
nfs_set_pageerror(page);
req->wb_context->error = task->tk_status;
dprintk(", error = %d\n", task->tk_status);
- } else {
-#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
- if (data->verf.committed < NFS_FILE_SYNC) {
- if (!NFS_NEED_COMMIT(req)) {
- nfs_defer_commit(req);
- memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf));
- dprintk(" defer commit\n");
- } else if (memcmp(&req->wb_verf, &data->verf, sizeof(req->wb_verf))) {
- nfs_defer_reschedule(req);
- dprintk(" server reboot detected\n");
- }
- } else
-#endif
- dprintk(" OK\n");
+ goto out;
}
+ if (nfs_write_need_commit(data)) {
+ spinlock_t *req_lock = &NFS_I(page->mapping->host)->req_lock;
+
+ spin_lock(req_lock);
+ if (test_bit(PG_NEED_RESCHED, &req->wb_flags)) {
+ /* Do nothing we need to resend the writes */
+ } else if (!test_and_set_bit(PG_NEED_COMMIT, &req->wb_flags)) {
+ memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf));
+ dprintk(" defer commit\n");
+ } else if (memcmp(&req->wb_verf, &data->verf, sizeof(req->wb_verf))) {
+ set_bit(PG_NEED_RESCHED, &req->wb_flags);
+ clear_bit(PG_NEED_COMMIT, &req->wb_flags);
+ dprintk(" server reboot detected\n");
+ }
+ spin_unlock(req_lock);
+ } else
+ dprintk(" OK\n");
+
+out:
if (atomic_dec_and_test(&req->wb_complete))
nfs_writepage_release(req);
}
@@ -1064,25 +1093,21 @@
if (task->tk_status < 0) {
nfs_set_pageerror(page);
req->wb_context->error = task->tk_status;
- nfs_end_page_writeback(page);
- nfs_inode_remove_request(req);
dprintk(", error = %d\n", task->tk_status);
- goto next;
+ goto remove_request;
}
- nfs_end_page_writeback(page);
-#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
- if (data->args.stable != NFS_UNSTABLE || data->verf.committed == NFS_FILE_SYNC) {
- nfs_inode_remove_request(req);
- dprintk(" OK\n");
+ if (nfs_write_need_commit(data)) {
+ memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf));
+ nfs_mark_request_commit(req);
+ nfs_end_page_writeback(page);
+ dprintk(" marked for commit\n");
goto next;
}
- memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf));
- nfs_mark_request_commit(req);
- dprintk(" marked for commit\n");
-#else
+ dprintk(" OK\n");
+remove_request:
+ nfs_end_page_writeback(page);
nfs_inode_remove_request(req);
-#endif
next:
nfs_clear_page_writeback(req);
}
@@ -1270,6 +1295,7 @@
while (!list_empty(&data->pages)) {
req = nfs_list_entry(data->pages.next);
nfs_list_remove_request(req);
+ clear_bit(PG_NEED_COMMIT, &(req)->wb_flags);
dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
dprintk("NFS: commit (%s/%Ld %d@%Ld)",
@@ -1505,15 +1531,22 @@
int nfs_set_page_dirty(struct page *page)
{
+ spinlock_t *req_lock = &NFS_I(page->mapping->host)->req_lock;
struct nfs_page *req;
+ int ret;
- req = nfs_page_find_request(page);
+ spin_lock(req_lock);
+ req = nfs_page_find_request_locked(page);
if (req != NULL) {
/* Mark any existing write requests for flushing */
- set_bit(PG_NEED_FLUSH, &req->wb_flags);
+ ret = !test_and_set_bit(PG_NEED_FLUSH, &req->wb_flags);
+ spin_unlock(req_lock);
nfs_release_request(req);
+ return ret;
}
- return __set_page_dirty_nobuffers(page);
+ ret = __set_page_dirty_nobuffers(page);
+ spin_unlock(req_lock);
+ return ret;
}
diff --git a/include/asm-mips/bug.h b/include/asm-mips/bug.h
index 4d560a5..7eb63de 100644
--- a/include/asm-mips/bug.h
+++ b/include/asm-mips/bug.h
@@ -18,7 +18,8 @@
#define BUG_ON(condition) \
do { \
- __asm__ __volatile__("tne $0, %0" : : "r" (condition)); \
+ __asm__ __volatile__("tne $0, %0, %1" \
+ : : "r" (condition), "i" (BRK_BUG)); \
} while (0)
#define HAVE_ARCH_BUG_ON
diff --git a/include/asm-mips/checksum.h b/include/asm-mips/checksum.h
index 20a81e1..290485a 100644
--- a/include/asm-mips/checksum.h
+++ b/include/asm-mips/checksum.h
@@ -166,7 +166,7 @@
#else
"r" (proto + len),
#endif
- "r" (sum));
+ "r" ((__force unsigned long)sum));
return sum;
}
diff --git a/include/asm-mips/fpu.h b/include/asm-mips/fpu.h
index 4e12d1f..b414a7d 100644
--- a/include/asm-mips/fpu.h
+++ b/include/asm-mips/fpu.h
@@ -68,8 +68,6 @@
/* We don't care about the c0 hazard here */ \
} while (0)
-#define __fpu_enabled() (read_c0_status() & ST0_CU1)
-
#define enable_fpu() \
do { \
if (cpu_has_fpu) \
@@ -102,14 +100,19 @@
set_thread_flag(TIF_USEDFPU);
}
-static inline void own_fpu(int restore)
+static inline void own_fpu_inatomic(int restore)
{
- preempt_disable();
if (cpu_has_fpu && !__is_fpu_owner()) {
__own_fpu();
if (restore)
_restore_fp(current);
}
+}
+
+static inline void own_fpu(int restore)
+{
+ preempt_disable();
+ own_fpu_inatomic(restore);
preempt_enable();
}
@@ -162,18 +165,4 @@
return tsk->thread.fpu.fpr;
}
-static inline void enable_fp_in_kernel(void)
-{
- set_thread_flag(TIF_ALLOW_FP_IN_KERNEL);
- /* make sure CU1 and FPU ownership are consistent */
- if (!__is_fpu_owner() && __fpu_enabled())
- __disable_fpu();
-}
-
-static inline void disable_fp_in_kernel(void)
-{
- BUG_ON(!__is_fpu_owner() && __fpu_enabled());
- clear_thread_flag(TIF_ALLOW_FP_IN_KERNEL);
-}
-
#endif /* _ASM_FPU_H */
diff --git a/include/asm-mips/sibyte/sb1250_scd.h b/include/asm-mips/sibyte/sb1250_scd.h
index 7ed0bb6..b6a7d8f 100644
--- a/include/asm-mips/sibyte/sb1250_scd.h
+++ b/include/asm-mips/sibyte/sb1250_scd.h
@@ -84,6 +84,7 @@
#define K_SYS_REVISION_BCM112x_A2 0x21
#define K_SYS_REVISION_BCM112x_A3 0x22
#define K_SYS_REVISION_BCM112x_A4 0x23
+#define K_SYS_REVISION_BCM112x_B0 0x30
#define K_SYS_REVISION_BCM1480_S0 0x01
#define K_SYS_REVISION_BCM1480_A1 0x02
diff --git a/include/asm-mips/thread_info.h b/include/asm-mips/thread_info.h
index 6cf05f4..fbcda82 100644
--- a/include/asm-mips/thread_info.h
+++ b/include/asm-mips/thread_info.h
@@ -119,7 +119,6 @@
#define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */
#define TIF_MEMDIE 18
#define TIF_FREEZE 19
-#define TIF_ALLOW_FP_IN_KERNEL 20
#define TIF_SYSCALL_TRACE 31 /* syscall trace active */
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h
index d111be6..16b0266 100644
--- a/include/linux/nfs_page.h
+++ b/include/linux/nfs_page.h
@@ -49,8 +49,6 @@
};
#define NFS_WBACK_BUSY(req) (test_bit(PG_BUSY,&(req)->wb_flags))
-#define NFS_NEED_COMMIT(req) (test_bit(PG_NEED_COMMIT,&(req)->wb_flags))
-#define NFS_NEED_RESCHED(req) (test_bit(PG_NEED_RESCHED,&(req)->wb_flags))
extern struct nfs_page *nfs_create_request(struct nfs_open_context *ctx,
struct inode *inode,
@@ -121,34 +119,6 @@
req->wb_list_head = NULL;
}
-static inline int
-nfs_defer_commit(struct nfs_page *req)
-{
- return !test_and_set_bit(PG_NEED_COMMIT, &req->wb_flags);
-}
-
-static inline void
-nfs_clear_commit(struct nfs_page *req)
-{
- smp_mb__before_clear_bit();
- clear_bit(PG_NEED_COMMIT, &req->wb_flags);
- smp_mb__after_clear_bit();
-}
-
-static inline int
-nfs_defer_reschedule(struct nfs_page *req)
-{
- return !test_and_set_bit(PG_NEED_RESCHED, &req->wb_flags);
-}
-
-static inline void
-nfs_clear_reschedule(struct nfs_page *req)
-{
- smp_mb__before_clear_bit();
- clear_bit(PG_NEED_RESCHED, &req->wb_flags);
- smp_mb__after_clear_bit();
-}
-
static inline struct nfs_page *
nfs_list_entry(struct list_head *head)
{
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 6d7221f..396cdbe 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -1046,6 +1046,8 @@
rpc_delay(task, 3*HZ);
case -ETIMEDOUT:
task->tk_action = call_timeout;
+ if (task->tk_client->cl_discrtry)
+ xprt_disconnect(task->tk_xprt);
break;
case -ECONNREFUSED:
case -ENOTCONN:
@@ -1169,6 +1171,8 @@
out_retry:
req->rq_received = req->rq_private_buf.len = 0;
task->tk_status = 0;
+ if (task->tk_client->cl_discrtry)
+ xprt_disconnect(task->tk_xprt);
}
/*
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index ee6ffa0..456a145 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -735,16 +735,6 @@
xprt_reset_majortimeo(req);
/* Turn off autodisconnect */
del_singleshot_timer_sync(&xprt->timer);
- } else {
- /* If all request bytes have been sent,
- * then we must be retransmitting this one */
- if (!req->rq_bytes_sent) {
- if (task->tk_client->cl_discrtry) {
- xprt_disconnect(xprt);
- task->tk_status = -ENOTCONN;
- return;
- }
- }
}
} else if (!req->rq_bytes_sent)
return;