Merge branch 'task_killable' of git://git.kernel.org/pub/scm/linux/kernel/git/willy/misc
* 'task_killable' of git://git.kernel.org/pub/scm/linux/kernel/git/willy/misc: (22 commits)
Remove commented-out code copied from NFS
NFS: Switch from intr mount option to TASK_KILLABLE
Add wait_for_completion_killable
Add wait_event_killable
Add schedule_timeout_killable
Use mutex_lock_killable in vfs_readdir
Add mutex_lock_killable
Use lock_page_killable
Add lock_page_killable
Add fatal_signal_pending
Add TASK_WAKEKILL
exit: Use task_is_*
signal: Use task_is_*
sched: Use task_contributes_to_load, TASK_ALL and TASK_NORMAL
ptrace: Use task_is_*
power: Use task_is_*
wait: Use TASK_NORMAL
proc/base.c: Use task_is_*
proc/array.c: Use TASK_REPORT
perfmon: Use task_is_*
...
Fixed up conflicts in NFS/sunrpc manually..
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index 73e7c2e..5ae177f 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -2631,7 +2631,7 @@
*/
if (task == current) return 0;
- if ((task->state != TASK_STOPPED) && (task->state != TASK_TRACED)) {
+ if (!task_is_stopped_or_traced(task)) {
DPRINT(("cannot attach to non-stopped task [%d] state=%ld\n", task_pid_nr(task), task->state));
return -EBUSY;
}
@@ -4792,7 +4792,7 @@
* the task must be stopped.
*/
if (PFM_CMD_STOPPED(cmd)) {
- if ((task->state != TASK_STOPPED) && (task->state != TASK_TRACED)) {
+ if (!task_is_stopped_or_traced(task)) {
DPRINT(("[%d] task not in stopped state\n", task_pid_nr(task)));
return -EBUSY;
}
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 34f68f3..81c04ab 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -656,8 +656,7 @@
* wait list.
*/
if (waitqueue_active(&ep->wq))
- __wake_up_locked(&ep->wq, TASK_UNINTERRUPTIBLE |
- TASK_INTERRUPTIBLE);
+ wake_up_locked(&ep->wq);
if (waitqueue_active(&ep->poll_wait))
pwake++;
@@ -780,7 +779,7 @@
/* Notify waiting tasks that events are available */
if (waitqueue_active(&ep->wq))
- __wake_up_locked(&ep->wq, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE);
+ wake_up_locked(&ep->wq);
if (waitqueue_active(&ep->poll_wait))
pwake++;
}
@@ -854,8 +853,7 @@
/* Notify waiting tasks that events are available */
if (waitqueue_active(&ep->wq))
- __wake_up_locked(&ep->wq, TASK_UNINTERRUPTIBLE |
- TASK_INTERRUPTIBLE);
+ wake_up_locked(&ep->wq);
if (waitqueue_active(&ep->poll_wait))
pwake++;
}
@@ -978,8 +976,7 @@
* wait list (delayed after we release the lock).
*/
if (waitqueue_active(&ep->wq))
- __wake_up_locked(&ep->wq, TASK_UNINTERRUPTIBLE |
- TASK_INTERRUPTIBLE);
+ wake_up_locked(&ep->wq);
if (waitqueue_active(&ep->poll_wait))
pwake++;
}
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index 685c43f..c5c0175 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -386,7 +386,7 @@
if (new)
nfs_free_client(new);
- error = wait_event_interruptible(nfs_client_active_wq,
+ error = wait_event_killable(nfs_client_active_wq,
clp->cl_cons_state != NFS_CS_INITING);
if (error < 0) {
nfs_put_client(clp);
@@ -589,10 +589,6 @@
if (server->flags & NFS_MOUNT_SOFT)
server->client->cl_softrtry = 1;
- server->client->cl_intr = 0;
- if (server->flags & NFS4_MOUNT_INTR)
- server->client->cl_intr = 1;
-
return 0;
}
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index f8e165c..16844f9 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -188,17 +188,12 @@
static ssize_t nfs_direct_wait(struct nfs_direct_req *dreq)
{
ssize_t result = -EIOCBQUEUED;
- struct rpc_clnt *clnt;
- sigset_t oldset;
/* Async requests don't wait here */
if (dreq->iocb)
goto out;
- clnt = NFS_CLIENT(dreq->inode);
- rpc_clnt_sigmask(clnt, &oldset);
- result = wait_for_completion_interruptible(&dreq->completion);
- rpc_clnt_sigunmask(clnt, &oldset);
+ result = wait_for_completion_killable(&dreq->completion);
if (!result)
result = dreq->error;
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 3f332e5..966a885 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -433,15 +433,11 @@
*/
static int nfs_wait_on_inode(struct inode *inode)
{
- struct rpc_clnt *clnt = NFS_CLIENT(inode);
struct nfs_inode *nfsi = NFS_I(inode);
- sigset_t oldmask;
int error;
- rpc_clnt_sigmask(clnt, &oldmask);
error = wait_on_bit_lock(&nfsi->flags, NFS_INO_REVALIDATING,
- nfs_wait_schedule, TASK_INTERRUPTIBLE);
- rpc_clnt_sigunmask(clnt, &oldmask);
+ nfs_wait_schedule, TASK_KILLABLE);
return error;
}
diff --git a/fs/nfs/mount_clnt.c b/fs/nfs/mount_clnt.c
index 8afd9f7..49c7cd0 100644
--- a/fs/nfs/mount_clnt.c
+++ b/fs/nfs/mount_clnt.c
@@ -56,7 +56,7 @@
.program = &mnt_program,
.version = version,
.authflavor = RPC_AUTH_UNIX,
- .flags = RPC_CLNT_CREATE_INTR,
+ .flags = 0,
};
struct rpc_clnt *mnt_clnt;
int status;
diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
index b353c1a..549dbce 100644
--- a/fs/nfs/nfs3proc.c
+++ b/fs/nfs/nfs3proc.c
@@ -27,17 +27,14 @@
static int
nfs3_rpc_wrapper(struct rpc_clnt *clnt, struct rpc_message *msg, int flags)
{
- sigset_t oldset;
int res;
- rpc_clnt_sigmask(clnt, &oldset);
do {
res = rpc_call_sync(clnt, msg, flags);
if (res != -EJUKEBOX)
break;
- schedule_timeout_interruptible(NFS_JUKEBOX_RETRY_TIME);
+ schedule_timeout_killable(NFS_JUKEBOX_RETRY_TIME);
res = -ERESTARTSYS;
- } while (!signalled());
- rpc_clnt_sigunmask(clnt, &oldset);
+ } while (!fatal_signal_pending(current));
return res;
}
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 5c189bd..027e109 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -316,12 +316,9 @@
static int nfs4_wait_for_completion_rpc_task(struct rpc_task *task)
{
- sigset_t oldset;
int ret;
- rpc_clnt_sigmask(task->tk_client, &oldset);
ret = rpc_wait_for_completion_task(task);
- rpc_clnt_sigunmask(task->tk_client, &oldset);
return ret;
}
@@ -2785,9 +2782,9 @@
return 0;
}
-static int nfs4_wait_bit_interruptible(void *word)
+static int nfs4_wait_bit_killable(void *word)
{
- if (signal_pending(current))
+ if (fatal_signal_pending(current))
return -ERESTARTSYS;
schedule();
return 0;
@@ -2795,18 +2792,14 @@
static int nfs4_wait_clnt_recover(struct rpc_clnt *clnt, struct nfs_client *clp)
{
- sigset_t oldset;
int res;
might_sleep();
rwsem_acquire(&clp->cl_sem.dep_map, 0, 0, _RET_IP_);
- rpc_clnt_sigmask(clnt, &oldset);
res = wait_on_bit(&clp->cl_state, NFS4CLNT_STATE_RECOVER,
- nfs4_wait_bit_interruptible,
- TASK_INTERRUPTIBLE);
- rpc_clnt_sigunmask(clnt, &oldset);
+ nfs4_wait_bit_killable, TASK_KILLABLE);
rwsem_release(&clp->cl_sem.dep_map, 1, _RET_IP_);
return res;
@@ -2814,7 +2807,6 @@
static int nfs4_delay(struct rpc_clnt *clnt, long *timeout)
{
- sigset_t oldset;
int res = 0;
might_sleep();
@@ -2823,14 +2815,9 @@
*timeout = NFS4_POLL_RETRY_MIN;
if (*timeout > NFS4_POLL_RETRY_MAX)
*timeout = NFS4_POLL_RETRY_MAX;
- rpc_clnt_sigmask(clnt, &oldset);
- if (clnt->cl_intr) {
- schedule_timeout_interruptible(*timeout);
- if (signalled())
- res = -ERESTARTSYS;
- } else
- schedule_timeout_uninterruptible(*timeout);
- rpc_clnt_sigunmask(clnt, &oldset);
+ schedule_timeout_killable(*timeout);
+ if (fatal_signal_pending(current))
+ res = -ERESTARTSYS;
*timeout <<= 1;
return res;
}
@@ -3069,7 +3056,7 @@
static unsigned long
nfs4_set_lock_task_retry(unsigned long timeout)
{
- schedule_timeout_interruptible(timeout);
+ schedule_timeout_killable(timeout);
timeout <<= 1;
if (timeout > NFS4_LOCK_MAXTIMEOUT)
return NFS4_LOCK_MAXTIMEOUT;
diff --git a/fs/nfs/nfsroot.c b/fs/nfs/nfsroot.c
index 4b03345..531379d 100644
--- a/fs/nfs/nfsroot.c
+++ b/fs/nfs/nfsroot.c
@@ -228,10 +228,7 @@
nfs_data.flags &= ~NFS_MOUNT_SOFT;
break;
case Opt_intr:
- nfs_data.flags |= NFS_MOUNT_INTR;
- break;
case Opt_nointr:
- nfs_data.flags &= ~NFS_MOUNT_INTR;
break;
case Opt_posix:
nfs_data.flags |= NFS_MOUNT_POSIX;
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index 3b3dbb9..7f07920 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -58,7 +58,6 @@
struct page *page,
unsigned int offset, unsigned int count)
{
- struct nfs_server *server = NFS_SERVER(inode);
struct nfs_page *req;
for (;;) {
@@ -67,7 +66,7 @@
if (req != NULL)
break;
- if (signalled() && (server->flags & NFS_MOUNT_INTR))
+ if (fatal_signal_pending(current))
return ERR_PTR(-ERESTARTSYS);
yield();
}
@@ -177,11 +176,11 @@
kref_put(&req->wb_kref, nfs_free_request);
}
-static int nfs_wait_bit_interruptible(void *word)
+static int nfs_wait_bit_killable(void *word)
{
int ret = 0;
- if (signal_pending(current))
+ if (fatal_signal_pending(current))
ret = -ERESTARTSYS;
else
schedule();
@@ -192,26 +191,18 @@
* nfs_wait_on_request - Wait for a request to complete.
* @req: request to wait upon.
*
- * Interruptible by signals only if mounted with intr flag.
+ * Interruptible by fatal signals only.
* The user is responsible for holding a count on the request.
*/
int
nfs_wait_on_request(struct nfs_page *req)
{
- struct rpc_clnt *clnt = NFS_CLIENT(req->wb_context->path.dentry->d_inode);
- sigset_t oldmask;
int ret = 0;
if (!test_bit(PG_BUSY, &req->wb_flags))
goto out;
- /*
- * Note: the call to rpc_clnt_sigmask() suffices to ensure that we
- * are not interrupted if intr flag is not set
- */
- rpc_clnt_sigmask(clnt, &oldmask);
ret = out_of_line_wait_on_bit(&req->wb_flags, PG_BUSY,
- nfs_wait_bit_interruptible, TASK_INTERRUPTIBLE);
- rpc_clnt_sigunmask(clnt, &oldmask);
+ nfs_wait_bit_killable, TASK_KILLABLE);
out:
return ret;
}
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 22c49c0..7f4505f 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -448,7 +448,6 @@
const char *nostr;
} nfs_info[] = {
{ NFS_MOUNT_SOFT, ",soft", ",hard" },
- { NFS_MOUNT_INTR, ",intr", ",nointr" },
{ NFS_MOUNT_NOCTO, ",nocto", "" },
{ NFS_MOUNT_NOAC, ",noac", "" },
{ NFS_MOUNT_NONLM, ",nolock", "" },
@@ -708,10 +707,7 @@
mnt->flags &= ~NFS_MOUNT_SOFT;
break;
case Opt_intr:
- mnt->flags |= NFS_MOUNT_INTR;
- break;
case Opt_nointr:
- mnt->flags &= ~NFS_MOUNT_INTR;
break;
case Opt_posix:
mnt->flags |= NFS_MOUNT_POSIX;
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 5ac5b27..522efff 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -488,7 +488,7 @@
/*
* Wait for a request to complete.
*
- * Interruptible by signals only if mounted with intr flag.
+ * Interruptible by fatal signals only.
*/
static int nfs_wait_on_requests_locked(struct inode *inode, pgoff_t idx_start, unsigned int npages)
{
diff --git a/fs/proc/array.c b/fs/proc/array.c
index eb97f28..b380313 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -141,12 +141,7 @@
static inline const char *get_task_state(struct task_struct *tsk)
{
- unsigned int state = (tsk->state & (TASK_RUNNING |
- TASK_INTERRUPTIBLE |
- TASK_UNINTERRUPTIBLE |
- TASK_STOPPED |
- TASK_TRACED)) |
- tsk->exit_state;
+ unsigned int state = (tsk->state & TASK_REPORT) | tsk->exit_state;
const char **p = &task_state_array[0];
while (state) {
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 91fa8e6..9fa9708 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -199,7 +199,7 @@
(task == current || \
(task->parent == current && \
(task->ptrace & PT_PTRACED) && \
- (task->state == TASK_STOPPED || task->state == TASK_TRACED) && \
+ (task_is_stopped_or_traced(task)) && \
security_ptrace(current,task) == 0))
struct mm_struct *mm_for_maps(struct task_struct *task)
diff --git a/fs/readdir.c b/fs/readdir.c
index efe52e6..4e026e5 100644
--- a/fs/readdir.c
+++ b/fs/readdir.c
@@ -30,7 +30,10 @@
if (res)
goto out;
- mutex_lock(&inode->i_mutex);
+ res = mutex_lock_killable(&inode->i_mutex);
+ if (res)
+ goto out;
+
res = -ENOENT;
if (!IS_DEADDIR(inode)) {
res = file->f_op->readdir(file, buf, filler);
diff --git a/fs/smbfs/request.c b/fs/smbfs/request.c
index ca4b2d5..45f4593 100644
--- a/fs/smbfs/request.c
+++ b/fs/smbfs/request.c
@@ -105,7 +105,7 @@
if (nfs_try_to_free_pages(server))
continue;
- if (signalled() && (server->flags & NFS_MOUNT_INTR))
+ if (fatal_signal_pending(current))
return ERR_PTR(-ERESTARTSYS);
current->policy = SCHED_YIELD;
schedule();
diff --git a/include/linux/completion.h b/include/linux/completion.h
index 33d6aaf..d2961b6 100644
--- a/include/linux/completion.h
+++ b/include/linux/completion.h
@@ -44,6 +44,7 @@
extern void wait_for_completion(struct completion *);
extern int wait_for_completion_interruptible(struct completion *x);
+extern int wait_for_completion_killable(struct completion *x);
extern unsigned long wait_for_completion_timeout(struct completion *x,
unsigned long timeout);
extern unsigned long wait_for_completion_interruptible_timeout(
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index 6014797..05c5903 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -125,15 +125,20 @@
extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass);
extern int __must_check mutex_lock_interruptible_nested(struct mutex *lock,
unsigned int subclass);
+extern int __must_check mutex_lock_killable_nested(struct mutex *lock,
+ unsigned int subclass);
#define mutex_lock(lock) mutex_lock_nested(lock, 0)
#define mutex_lock_interruptible(lock) mutex_lock_interruptible_nested(lock, 0)
+#define mutex_lock_killable(lock) mutex_lock_killable_nested(lock, 0)
#else
extern void fastcall mutex_lock(struct mutex *lock);
extern int __must_check fastcall mutex_lock_interruptible(struct mutex *lock);
+extern int __must_check fastcall mutex_lock_killable(struct mutex *lock);
# define mutex_lock_nested(lock, subclass) mutex_lock(lock)
# define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock)
+# define mutex_lock_killable_nested(lock, subclass) mutex_lock_killable(lock)
#endif
/*
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index 099ddb4..a69ba80 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -556,14 +556,7 @@
#define nfs_wait_event(clnt, wq, condition) \
({ \
- int __retval = 0; \
- if (clnt->cl_intr) { \
- sigset_t oldmask; \
- rpc_clnt_sigmask(clnt, &oldmask); \
- __retval = wait_event_interruptible(wq, condition); \
- rpc_clnt_sigunmask(clnt, &oldmask); \
- } else \
- wait_event(wq, condition); \
+ int __retval = wait_event_killable(wq, condition); \
__retval; \
})
diff --git a/include/linux/nfs_mount.h b/include/linux/nfs_mount.h
index a3ade89..df7c6b7 100644
--- a/include/linux/nfs_mount.h
+++ b/include/linux/nfs_mount.h
@@ -48,7 +48,7 @@
/* bits in the flags field */
#define NFS_MOUNT_SOFT 0x0001 /* 1 */
-#define NFS_MOUNT_INTR 0x0002 /* 1 */
+#define NFS_MOUNT_INTR 0x0002 /* 1 */ /* now unused, but ABI */
#define NFS_MOUNT_SECURE 0x0004 /* 1 */
#define NFS_MOUNT_POSIX 0x0008 /* 1 */
#define NFS_MOUNT_NOCTO 0x0010 /* 1 */
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index db8a410..4b62a10 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -157,6 +157,7 @@
}
extern void FASTCALL(__lock_page(struct page *page));
+extern int FASTCALL(__lock_page_killable(struct page *page));
extern void FASTCALL(__lock_page_nosync(struct page *page));
extern void FASTCALL(unlock_page(struct page *page));
@@ -171,6 +172,19 @@
}
/*
+ * lock_page_killable is like lock_page but can be interrupted by fatal
+ * signals. It returns 0 if it locked the page and -EINTR if it was
+ * killed while waiting.
+ */
+static inline int lock_page_killable(struct page *page)
+{
+ might_sleep();
+ if (TestSetPageLocked(page))
+ return __lock_page_killable(page);
+ return 0;
+}
+
+/*
* lock_page_nosync should only be used if we can't pin the page's inode.
* Doesn't play quite so well with block device plugging.
*/
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 9d47976..6c33357 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -172,13 +172,35 @@
#define TASK_RUNNING 0
#define TASK_INTERRUPTIBLE 1
#define TASK_UNINTERRUPTIBLE 2
-#define TASK_STOPPED 4
-#define TASK_TRACED 8
+#define __TASK_STOPPED 4
+#define __TASK_TRACED 8
/* in tsk->exit_state */
#define EXIT_ZOMBIE 16
#define EXIT_DEAD 32
/* in tsk->state again */
#define TASK_DEAD 64
+#define TASK_WAKEKILL 128
+
+/* Convenience macros for the sake of set_task_state */
+#define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
+#define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED)
+#define TASK_TRACED (TASK_WAKEKILL | __TASK_TRACED)
+
+/* Convenience macros for the sake of wake_up */
+#define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
+#define TASK_ALL (TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED)
+
+/* get_task_state() */
+#define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \
+ TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
+ __TASK_TRACED)
+
+#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0)
+#define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0)
+#define task_is_stopped_or_traced(task) \
+ ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
+#define task_contributes_to_load(task) \
+ ((task->state & TASK_UNINTERRUPTIBLE) != 0)
#define __set_task_state(tsk, state_value) \
do { (tsk)->state = (state_value); } while (0)
@@ -302,6 +324,7 @@
#define MAX_SCHEDULE_TIMEOUT LONG_MAX
extern signed long FASTCALL(schedule_timeout(signed long timeout));
extern signed long schedule_timeout_interruptible(signed long timeout);
+extern signed long schedule_timeout_killable(signed long timeout);
extern signed long schedule_timeout_uninterruptible(signed long timeout);
asmlinkage void schedule(void);
@@ -1892,7 +1915,14 @@
{
return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));
}
-
+
+extern int FASTCALL(__fatal_signal_pending(struct task_struct *p));
+
+static inline int fatal_signal_pending(struct task_struct *p)
+{
+ return signal_pending(p) && __fatal_signal_pending(p);
+}
+
static inline int need_resched(void)
{
return unlikely(test_thread_flag(TIF_NEED_RESCHED));
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
index 3e9addc..129a86e 100644
--- a/include/linux/sunrpc/clnt.h
+++ b/include/linux/sunrpc/clnt.h
@@ -41,7 +41,6 @@
struct rpc_iostats * cl_metrics; /* per-client statistics */
unsigned int cl_softrtry : 1,/* soft timeouts */
- cl_intr : 1,/* interruptible */
cl_discrtry : 1,/* disconnect before retry */
cl_autobind : 1;/* use getport() */
@@ -111,7 +110,6 @@
/* Values for "flags" field */
#define RPC_CLNT_CREATE_HARDRTRY (1UL << 0)
-#define RPC_CLNT_CREATE_INTR (1UL << 1)
#define RPC_CLNT_CREATE_AUTOBIND (1UL << 2)
#define RPC_CLNT_CREATE_NONPRIVPORT (1UL << 3)
#define RPC_CLNT_CREATE_NOPING (1UL << 4)
@@ -137,8 +135,6 @@
struct rpc_task *rpc_call_null(struct rpc_clnt *clnt, struct rpc_cred *cred,
int flags);
void rpc_restart_call(struct rpc_task *);
-void rpc_clnt_sigmask(struct rpc_clnt *clnt, sigset_t *oldset);
-void rpc_clnt_sigunmask(struct rpc_clnt *clnt, sigset_t *oldset);
void rpc_setbufsize(struct rpc_clnt *, unsigned int, unsigned int);
size_t rpc_max_payload(struct rpc_clnt *);
void rpc_force_rebind(struct rpc_clnt *);
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
index ce3d1b1..f689f02 100644
--- a/include/linux/sunrpc/sched.h
+++ b/include/linux/sunrpc/sched.h
@@ -137,7 +137,6 @@
#define RPC_TASK_DYNAMIC 0x0080 /* task was kmalloc'ed */
#define RPC_TASK_KILLED 0x0100 /* task was killed */
#define RPC_TASK_SOFT 0x0200 /* Use soft timeouts */
-#define RPC_TASK_NOINTR 0x0400 /* uninterruptible task */
#define RPC_IS_ASYNC(t) ((t)->tk_flags & RPC_TASK_ASYNC)
#define RPC_IS_SWAPPER(t) ((t)->tk_flags & RPC_TASK_SWAPPER)
@@ -145,7 +144,6 @@
#define RPC_ASSASSINATED(t) ((t)->tk_flags & RPC_TASK_KILLED)
#define RPC_DO_CALLBACK(t) ((t)->tk_callback != NULL)
#define RPC_IS_SOFT(t) ((t)->tk_flags & RPC_TASK_SOFT)
-#define RPC_TASK_UNINTERRUPTIBLE(t) ((t)->tk_flags & RPC_TASK_NOINTR)
#define RPC_TASK_RUNNING 0
#define RPC_TASK_QUEUED 1
diff --git a/include/linux/wait.h b/include/linux/wait.h
index 0e68628..1f4fb0a 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -152,14 +152,15 @@
int FASTCALL(out_of_line_wait_on_bit_lock(void *, int, int (*)(void *), unsigned));
wait_queue_head_t *FASTCALL(bit_waitqueue(void *, int));
-#define wake_up(x) __wake_up(x, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 1, NULL)
-#define wake_up_nr(x, nr) __wake_up(x, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, nr, NULL)
-#define wake_up_all(x) __wake_up(x, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 0, NULL)
+#define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL)
+#define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL)
+#define wake_up_all(x) __wake_up(x, TASK_NORMAL, 0, NULL)
+#define wake_up_locked(x) __wake_up_locked((x), TASK_NORMAL)
+
#define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
#define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
#define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
-#define wake_up_locked(x) __wake_up_locked((x), TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE)
-#define wake_up_interruptible_sync(x) __wake_up_sync((x),TASK_INTERRUPTIBLE, 1)
+#define wake_up_interruptible_sync(x) __wake_up_sync((x), TASK_INTERRUPTIBLE, 1)
#define __wait_event(wq, condition) \
do { \
@@ -345,6 +346,47 @@
__ret; \
})
+#define __wait_event_killable(wq, condition, ret) \
+do { \
+ DEFINE_WAIT(__wait); \
+ \
+ for (;;) { \
+ prepare_to_wait(&wq, &__wait, TASK_KILLABLE); \
+ if (condition) \
+ break; \
+ if (!fatal_signal_pending(current)) { \
+ schedule(); \
+ continue; \
+ } \
+ ret = -ERESTARTSYS; \
+ break; \
+ } \
+ finish_wait(&wq, &__wait); \
+} while (0)
+
+/**
+ * wait_event_killable - sleep until a condition gets true
+ * @wq: the waitqueue to wait on
+ * @condition: a C expression for the event to wait for
+ *
+ * The process is put to sleep (TASK_KILLABLE) until the
+ * @condition evaluates to true or a signal is received.
+ * The @condition is checked each time the waitqueue @wq is woken up.
+ *
+ * wake_up() has to be called after changing any variable that could
+ * change the result of the wait condition.
+ *
+ * The function will return -ERESTARTSYS if it was interrupted by a
+ * signal and 0 if @condition evaluated to true.
+ */
+#define wait_event_killable(wq, condition) \
+({ \
+ int __ret = 0; \
+ if (!(condition)) \
+ __wait_event_killable(wq, condition, __ret); \
+ __ret; \
+})
+
/*
* Must be called with the spinlock in the wait_queue_head_t held.
*/
diff --git a/kernel/exit.c b/kernel/exit.c
index 549c055..bfb1c0e 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -249,7 +249,7 @@
struct task_struct *p;
do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
- if (p->state != TASK_STOPPED)
+ if (!task_is_stopped(p))
continue;
retval = 1;
break;
@@ -614,7 +614,7 @@
p->parent = p->real_parent;
add_parent(p);
- if (p->state == TASK_TRACED) {
+ if (task_is_traced(p)) {
/*
* If it was at a trace stop, turn it into
* a normal stop since it's no longer being
@@ -1563,60 +1563,51 @@
}
allowed = 1;
- switch (p->state) {
- case TASK_TRACED:
- /*
- * When we hit the race with PTRACE_ATTACH,
- * we will not report this child. But the
- * race means it has not yet been moved to
- * our ptrace_children list, so we need to
- * set the flag here to avoid a spurious ECHILD
- * when the race happens with the only child.
- */
- flag = 1;
- if (!my_ptrace_child(p))
- continue;
- /*FALLTHROUGH*/
- case TASK_STOPPED:
+ if (task_is_stopped_or_traced(p)) {
/*
* It's stopped now, so it might later
* continue, exit, or stop again.
+ *
+ * When we hit the race with PTRACE_ATTACH, we
+ * will not report this child. But the race
+ * means it has not yet been moved to our
+ * ptrace_children list, so we need to set the
+ * flag here to avoid a spurious ECHILD when
+ * the race happens with the only child.
*/
flag = 1;
- if (!(options & WUNTRACED) &&
- !my_ptrace_child(p))
- continue;
+
+ if (!my_ptrace_child(p)) {
+ if (task_is_traced(p))
+ continue;
+ if (!(options & WUNTRACED))
+ continue;
+ }
+
retval = wait_task_stopped(p, ret == 2,
- (options & WNOWAIT),
- infop,
- stat_addr, ru);
+ (options & WNOWAIT), infop,
+ stat_addr, ru);
if (retval == -EAGAIN)
goto repeat;
if (retval != 0) /* He released the lock. */
goto end;
- break;
- default:
- // case EXIT_DEAD:
- if (p->exit_state == EXIT_DEAD)
+ } else if (p->exit_state == EXIT_DEAD) {
+ continue;
+ } else if (p->exit_state == EXIT_ZOMBIE) {
+ /*
+ * Eligible but we cannot release it yet:
+ */
+ if (ret == 2)
+ goto check_continued;
+ if (!likely(options & WEXITED))
continue;
- // case EXIT_ZOMBIE:
- if (p->exit_state == EXIT_ZOMBIE) {
- /*
- * Eligible but we cannot release
- * it yet:
- */
- if (ret == 2)
- goto check_continued;
- if (!likely(options & WEXITED))
- continue;
- retval = wait_task_zombie(
- p, (options & WNOWAIT),
- infop, stat_addr, ru);
- /* He released the lock. */
- if (retval != 0)
- goto end;
- break;
- }
+ retval = wait_task_zombie(p,
+ (options & WNOWAIT), infop,
+ stat_addr, ru);
+ /* He released the lock. */
+ if (retval != 0)
+ goto end;
+ } else {
check_continued:
/*
* It's running now, so it might later
@@ -1625,12 +1616,11 @@
flag = 1;
if (!unlikely(options & WCONTINUED))
continue;
- retval = wait_task_continued(
- p, (options & WNOWAIT),
- infop, stat_addr, ru);
+ retval = wait_task_continued(p,
+ (options & WNOWAIT), infop,
+ stat_addr, ru);
if (retval != 0) /* He released the lock. */
goto end;
- break;
}
}
if (!flag) {
diff --git a/kernel/mutex.c b/kernel/mutex.c
index d7fe50c..d9ec9b6 100644
--- a/kernel/mutex.c
+++ b/kernel/mutex.c
@@ -166,9 +166,12 @@
* got a signal? (This code gets eliminated in the
* TASK_UNINTERRUPTIBLE case.)
*/
- if (unlikely(state == TASK_INTERRUPTIBLE &&
- signal_pending(task))) {
- mutex_remove_waiter(lock, &waiter, task_thread_info(task));
+ if (unlikely((state == TASK_INTERRUPTIBLE &&
+ signal_pending(task)) ||
+ (state == TASK_KILLABLE &&
+ fatal_signal_pending(task)))) {
+ mutex_remove_waiter(lock, &waiter,
+ task_thread_info(task));
mutex_release(&lock->dep_map, 1, ip);
spin_unlock_mutex(&lock->wait_lock, flags);
@@ -211,6 +214,14 @@
EXPORT_SYMBOL_GPL(mutex_lock_nested);
int __sched
+mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
+{
+ might_sleep();
+ return __mutex_lock_common(lock, TASK_KILLABLE, subclass, _RET_IP_);
+}
+EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
+
+int __sched
mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
{
might_sleep();
@@ -272,6 +283,9 @@
* mutex_lock_interruptible() and mutex_trylock().
*/
static int fastcall noinline __sched
+__mutex_lock_killable_slowpath(atomic_t *lock_count);
+
+static noinline int fastcall __sched
__mutex_lock_interruptible_slowpath(atomic_t *lock_count);
/***
@@ -294,6 +308,14 @@
EXPORT_SYMBOL(mutex_lock_interruptible);
+int fastcall __sched mutex_lock_killable(struct mutex *lock)
+{
+ might_sleep();
+ return __mutex_fastpath_lock_retval
+ (&lock->count, __mutex_lock_killable_slowpath);
+}
+EXPORT_SYMBOL(mutex_lock_killable);
+
static void fastcall noinline __sched
__mutex_lock_slowpath(atomic_t *lock_count)
{
@@ -303,6 +325,14 @@
}
static int fastcall noinline __sched
+__mutex_lock_killable_slowpath(atomic_t *lock_count)
+{
+ struct mutex *lock = container_of(lock_count, struct mutex, count);
+
+ return __mutex_lock_common(lock, TASK_KILLABLE, 0, _RET_IP_);
+}
+
+static noinline int fastcall __sched
__mutex_lock_interruptible_slowpath(atomic_t *lock_count)
{
struct mutex *lock = container_of(lock_count, struct mutex, count);
diff --git a/kernel/power/process.c b/kernel/power/process.c
index 6533923..7c2118f 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -86,9 +86,9 @@
static void send_fake_signal(struct task_struct *p)
{
- if (p->state == TASK_STOPPED)
+ if (task_is_stopped(p))
force_sig_specific(SIGSTOP, p);
- fake_signal_wake_up(p, p->state == TASK_STOPPED);
+ fake_signal_wake_up(p, task_is_stopped(p));
}
static int has_mm(struct task_struct *p)
@@ -182,7 +182,7 @@
if (frozen(p) || !freezeable(p))
continue;
- if (p->state == TASK_TRACED && frozen(p->parent)) {
+ if (task_is_traced(p) && frozen(p->parent)) {
cancel_freezing(p);
continue;
}
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index e6e9b8be..b0d4ab4 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -51,7 +51,7 @@
void ptrace_untrace(struct task_struct *child)
{
spin_lock(&child->sighand->siglock);
- if (child->state == TASK_TRACED) {
+ if (task_is_traced(child)) {
if (child->signal->flags & SIGNAL_STOP_STOPPED) {
child->state = TASK_STOPPED;
} else {
@@ -79,7 +79,7 @@
add_parent(child);
}
- if (child->state == TASK_TRACED)
+ if (task_is_traced(child))
ptrace_untrace(child);
}
@@ -103,9 +103,9 @@
&& child->signal != NULL) {
ret = 0;
spin_lock_irq(&child->sighand->siglock);
- if (child->state == TASK_STOPPED) {
+ if (task_is_stopped(child)) {
child->state = TASK_TRACED;
- } else if (child->state != TASK_TRACED && !kill) {
+ } else if (!task_is_traced(child) && !kill) {
ret = -ESRCH;
}
spin_unlock_irq(&child->sighand->siglock);
diff --git a/kernel/sched.c b/kernel/sched.c
index 8355e00..9474b23 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1350,7 +1350,7 @@
*/
static void activate_task(struct rq *rq, struct task_struct *p, int wakeup)
{
- if (p->state == TASK_UNINTERRUPTIBLE)
+ if (task_contributes_to_load(p))
rq->nr_uninterruptible--;
enqueue_task(rq, p, wakeup);
@@ -1362,7 +1362,7 @@
*/
static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep)
{
- if (p->state == TASK_UNINTERRUPTIBLE)
+ if (task_contributes_to_load(p))
rq->nr_uninterruptible++;
dequeue_task(rq, p, sleep);
@@ -1895,8 +1895,7 @@
int fastcall wake_up_process(struct task_struct *p)
{
- return try_to_wake_up(p, TASK_STOPPED | TASK_TRACED |
- TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE, 0);
+ return try_to_wake_up(p, TASK_ALL, 0);
}
EXPORT_SYMBOL(wake_up_process);
@@ -4124,8 +4123,7 @@
spin_lock_irqsave(&x->wait.lock, flags);
x->done++;
- __wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE,
- 1, 0, NULL);
+ __wake_up_common(&x->wait, TASK_NORMAL, 1, 0, NULL);
spin_unlock_irqrestore(&x->wait.lock, flags);
}
EXPORT_SYMBOL(complete);
@@ -4136,8 +4134,7 @@
spin_lock_irqsave(&x->wait.lock, flags);
x->done += UINT_MAX/2;
- __wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE,
- 0, 0, NULL);
+ __wake_up_common(&x->wait, TASK_NORMAL, 0, 0, NULL);
spin_unlock_irqrestore(&x->wait.lock, flags);
}
EXPORT_SYMBOL(complete_all);
@@ -4151,8 +4148,10 @@
wait.flags |= WQ_FLAG_EXCLUSIVE;
__add_wait_queue_tail(&x->wait, &wait);
do {
- if (state == TASK_INTERRUPTIBLE &&
- signal_pending(current)) {
+ if ((state == TASK_INTERRUPTIBLE &&
+ signal_pending(current)) ||
+ (state == TASK_KILLABLE &&
+ fatal_signal_pending(current))) {
__remove_wait_queue(&x->wait, &wait);
return -ERESTARTSYS;
}
@@ -4212,6 +4211,15 @@
}
EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
+int __sched wait_for_completion_killable(struct completion *x)
+{
+ long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
+ if (t == -ERESTARTSYS)
+ return t;
+ return 0;
+}
+EXPORT_SYMBOL(wait_for_completion_killable);
+
static long __sched
sleep_on_common(wait_queue_head_t *q, int state, long timeout)
{
diff --git a/kernel/signal.c b/kernel/signal.c
index bf49ce6..8054dd4 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -456,15 +456,15 @@
set_tsk_thread_flag(t, TIF_SIGPENDING);
/*
- * For SIGKILL, we want to wake it up in the stopped/traced case.
- * We don't check t->state here because there is a race with it
+ * For SIGKILL, we want to wake it up in the stopped/traced/killable
+ * case. We don't check t->state here because there is a race with it
* executing another processor and just now entering stopped state.
* By using wake_up_state, we ensure the process will wake up and
* handle its death signal.
*/
mask = TASK_INTERRUPTIBLE;
if (resume)
- mask |= TASK_STOPPED | TASK_TRACED;
+ mask |= TASK_WAKEKILL;
if (!wake_up_state(t, mask))
kick_process(t);
}
@@ -620,7 +620,7 @@
* Wake up the stopped thread _after_ setting
* TIF_SIGPENDING
*/
- state = TASK_STOPPED;
+ state = __TASK_STOPPED;
if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) {
set_tsk_thread_flag(t, TIF_SIGPENDING);
state |= TASK_INTERRUPTIBLE;
@@ -838,7 +838,7 @@
return 0;
if (sig == SIGKILL)
return 1;
- if (p->state & (TASK_STOPPED | TASK_TRACED))
+ if (task_is_stopped_or_traced(p))
return 0;
return task_curr(p) || !signal_pending(p);
}
@@ -994,6 +994,11 @@
}
}
+int fastcall __fatal_signal_pending(struct task_struct *tsk)
+{
+ return sigismember(&tsk->pending.signal, SIGKILL);
+}
+
/*
* Must be called under rcu_read_lock() or with tasklist_lock read-held.
*/
@@ -1441,7 +1446,7 @@
BUG_ON(sig == -1);
/* do_notify_parent_cldstop should have been called instead. */
- BUG_ON(tsk->state & (TASK_STOPPED|TASK_TRACED));
+ BUG_ON(task_is_stopped_or_traced(tsk));
BUG_ON(!tsk->ptrace &&
(tsk->group_leader != tsk || !thread_group_empty(tsk)));
@@ -1729,7 +1734,7 @@
* so this check has no races.
*/
if (!t->exit_state &&
- !(t->state & (TASK_STOPPED|TASK_TRACED))) {
+ !task_is_stopped_or_traced(t)) {
stop_count++;
signal_wake_up(t, 0);
}
diff --git a/kernel/timer.c b/kernel/timer.c
index 23f7ead..9fbb472 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -1099,6 +1099,13 @@
}
EXPORT_SYMBOL(schedule_timeout_interruptible);
+signed long __sched schedule_timeout_killable(signed long timeout)
+{
+ __set_current_state(TASK_KILLABLE);
+ return schedule_timeout(timeout);
+}
+EXPORT_SYMBOL(schedule_timeout_killable);
+
signed long __sched schedule_timeout_uninterruptible(signed long timeout)
{
__set_current_state(TASK_UNINTERRUPTIBLE);
diff --git a/kernel/wait.c b/kernel/wait.c
index 444ddbf..f987688 100644
--- a/kernel/wait.c
+++ b/kernel/wait.c
@@ -215,7 +215,7 @@
{
struct wait_bit_key key = __WAIT_BIT_KEY_INITIALIZER(word, bit);
if (waitqueue_active(wq))
- __wake_up(wq, TASK_INTERRUPTIBLE|TASK_UNINTERRUPTIBLE, 1, &key);
+ __wake_up(wq, TASK_NORMAL, 1, &key);
}
EXPORT_SYMBOL(__wake_up_bit);
diff --git a/mm/filemap.c b/mm/filemap.c
index f4d0cde..89ce6fe 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -185,6 +185,12 @@
return 0;
}
+static int sync_page_killable(void *word)
+{
+ sync_page(word);
+ return fatal_signal_pending(current) ? -EINTR : 0;
+}
+
/**
* __filemap_fdatawrite_range - start writeback on mapping dirty pages in range
* @mapping: address space structure to write
@@ -589,6 +595,14 @@
}
EXPORT_SYMBOL(__lock_page);
+int fastcall __lock_page_killable(struct page *page)
+{
+ DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
+
+ return __wait_on_bit_lock(page_waitqueue(page), &wait,
+ sync_page_killable, TASK_KILLABLE);
+}
+
/*
* Variant of lock_page that does not require the caller to hold a reference
* on the page's mapping.
@@ -980,7 +994,8 @@
page_not_up_to_date:
/* Get exclusive access to the page ... */
- lock_page(page);
+ if (lock_page_killable(page))
+ goto readpage_eio;
/* Did it get truncated before we got the lock? */
if (!page->mapping) {
@@ -1008,7 +1023,8 @@
}
if (!PageUptodate(page)) {
- lock_page(page);
+ if (lock_page_killable(page))
+ goto readpage_eio;
if (!PageUptodate(page)) {
if (page->mapping == NULL) {
/*
@@ -1019,15 +1035,16 @@
goto find_page;
}
unlock_page(page);
- error = -EIO;
shrink_readahead_size_eio(filp, ra);
- goto readpage_error;
+ goto readpage_eio;
}
unlock_page(page);
}
goto page_ok;
+readpage_eio:
+ error = -EIO;
readpage_error:
/* UHHUH! A synchronous read error occurred. Report it */
desc->error = error;
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c
index bcd9abd..eca941c 100644
--- a/net/sunrpc/auth.c
+++ b/net/sunrpc/auth.c
@@ -385,7 +385,6 @@
.group_info = current->group_info,
};
struct rpc_cred *ret;
- sigset_t oldset;
int flags = 0;
dprintk("RPC: %5u looking up %s cred\n",
@@ -393,9 +392,7 @@
get_group_info(acred.group_info);
if (task->tk_flags & RPC_TASK_ROOTCREDS)
flags |= RPCAUTH_LOOKUP_ROOTCREDS;
- rpc_clnt_sigmask(task->tk_client, &oldset);
ret = auth->au_ops->lookup_cred(auth, &acred, flags);
- rpc_clnt_sigunmask(task->tk_client, &oldset);
if (!IS_ERR(ret))
task->tk_msg.rpc_cred = ret;
else
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 924916c..0998e6d 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -313,7 +313,7 @@
return clnt;
if (!(args->flags & RPC_CLNT_CREATE_NOPING)) {
- int err = rpc_ping(clnt, RPC_TASK_SOFT|RPC_TASK_NOINTR);
+ int err = rpc_ping(clnt, RPC_TASK_SOFT);
if (err != 0) {
rpc_shutdown_client(clnt);
return ERR_PTR(err);
@@ -324,8 +324,6 @@
if (args->flags & RPC_CLNT_CREATE_HARDRTRY)
clnt->cl_softrtry = 0;
- if (args->flags & RPC_CLNT_CREATE_INTR)
- clnt->cl_intr = 1;
if (args->flags & RPC_CLNT_CREATE_AUTOBIND)
clnt->cl_autobind = 1;
if (args->flags & RPC_CLNT_CREATE_DISCRTRY)
@@ -493,7 +491,7 @@
clnt->cl_prog = program->number;
clnt->cl_vers = version->number;
clnt->cl_stats = program->stats;
- err = rpc_ping(clnt, RPC_TASK_SOFT|RPC_TASK_NOINTR);
+ err = rpc_ping(clnt, RPC_TASK_SOFT);
if (err != 0) {
rpc_shutdown_client(clnt);
clnt = ERR_PTR(err);
@@ -515,46 +513,6 @@
.rpc_call_done = rpc_default_callback,
};
-/*
- * Export the signal mask handling for synchronous code that
- * sleeps on RPC calls
- */
-#define RPC_INTR_SIGNALS (sigmask(SIGHUP) | sigmask(SIGINT) | sigmask(SIGQUIT) | sigmask(SIGTERM))
-
-static void rpc_save_sigmask(sigset_t *oldset, int intr)
-{
- unsigned long sigallow = sigmask(SIGKILL);
- sigset_t sigmask;
-
- /* Block all signals except those listed in sigallow */
- if (intr)
- sigallow |= RPC_INTR_SIGNALS;
- siginitsetinv(&sigmask, sigallow);
- sigprocmask(SIG_BLOCK, &sigmask, oldset);
-}
-
-static void rpc_task_sigmask(struct rpc_task *task, sigset_t *oldset)
-{
- rpc_save_sigmask(oldset, !RPC_TASK_UNINTERRUPTIBLE(task));
-}
-
-static void rpc_restore_sigmask(sigset_t *oldset)
-{
- sigprocmask(SIG_SETMASK, oldset, NULL);
-}
-
-void rpc_clnt_sigmask(struct rpc_clnt *clnt, sigset_t *oldset)
-{
- rpc_save_sigmask(oldset, clnt->cl_intr);
-}
-EXPORT_SYMBOL_GPL(rpc_clnt_sigmask);
-
-void rpc_clnt_sigunmask(struct rpc_clnt *clnt, sigset_t *oldset)
-{
- rpc_restore_sigmask(oldset);
-}
-EXPORT_SYMBOL_GPL(rpc_clnt_sigunmask);
-
/**
* rpc_run_task - Allocate a new RPC task, then run rpc_execute against it
* @task_setup_data: pointer to task initialisation data
@@ -562,7 +520,6 @@
struct rpc_task *rpc_run_task(const struct rpc_task_setup *task_setup_data)
{
struct rpc_task *task, *ret;
- sigset_t oldset;
task = rpc_new_task(task_setup_data);
if (task == NULL) {
@@ -578,13 +535,7 @@
goto out;
}
atomic_inc(&task->tk_count);
- /* Mask signals on synchronous RPC calls and RPCSEC_GSS upcalls */
- if (!RPC_IS_ASYNC(task)) {
- rpc_task_sigmask(task, &oldset);
- rpc_execute(task);
- rpc_restore_sigmask(&oldset);
- } else
- rpc_execute(task);
+ rpc_execute(task);
ret = task;
out:
return ret;
diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
index fa5b8f2..3164a08 100644
--- a/net/sunrpc/rpcb_clnt.c
+++ b/net/sunrpc/rpcb_clnt.c
@@ -120,8 +120,7 @@
.program = &rpcb_program,
.version = version,
.authflavor = RPC_AUTH_UNIX,
- .flags = (RPC_CLNT_CREATE_NOPING |
- RPC_CLNT_CREATE_INTR),
+ .flags = RPC_CLNT_CREATE_NOPING,
};
switch (srvaddr->sa_family) {
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index 40ce6f6..4c66912 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -245,9 +245,9 @@
}
EXPORT_SYMBOL_GPL(rpc_init_wait_queue);
-static int rpc_wait_bit_interruptible(void *word)
+static int rpc_wait_bit_killable(void *word)
{
- if (signal_pending(current))
+ if (fatal_signal_pending(current))
return -ERESTARTSYS;
schedule();
return 0;
@@ -299,9 +299,9 @@
int __rpc_wait_for_completion_task(struct rpc_task *task, int (*action)(void *))
{
if (action == NULL)
- action = rpc_wait_bit_interruptible;
+ action = rpc_wait_bit_killable;
return wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE,
- action, TASK_INTERRUPTIBLE);
+ action, TASK_KILLABLE);
}
EXPORT_SYMBOL_GPL(__rpc_wait_for_completion_task);
@@ -696,10 +696,9 @@
/* sync task: sleep here */
dprintk("RPC: %5u sync task going to sleep\n", task->tk_pid);
- /* Note: Caller should be using rpc_clnt_sigmask() */
status = out_of_line_wait_on_bit(&task->tk_runstate,
- RPC_TASK_QUEUED, rpc_wait_bit_interruptible,
- TASK_INTERRUPTIBLE);
+ RPC_TASK_QUEUED, rpc_wait_bit_killable,
+ TASK_KILLABLE);
if (status == -ERESTARTSYS) {
/*
* When a sync task receives a signal, it exits with
@@ -840,8 +839,6 @@
kref_get(&task->tk_client->cl_kref);
if (task->tk_client->cl_softrtry)
task->tk_flags |= RPC_TASK_SOFT;
- if (!task->tk_client->cl_intr)
- task->tk_flags |= RPC_TASK_NOINTR;
}
if (task->tk_ops->rpc_call_prepare != NULL)