blob: 48510e3ffa02a3aff8ad0714011ab5433494b971 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/net/sunrpc/sched.c
3 *
4 * Scheduling for synchronous and asynchronous RPC requests.
5 *
6 * Copyright (C) 1996 Olaf Kirch, <okir@monad.swb.de>
7 *
8 * TCP NFS related read + write fixes
9 * (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie>
10 */
11
12#include <linux/module.h>
13
14#include <linux/sched.h>
15#include <linux/interrupt.h>
16#include <linux/slab.h>
17#include <linux/mempool.h>
18#include <linux/smp.h>
19#include <linux/smp_lock.h>
20#include <linux/spinlock.h>
21
22#include <linux/sunrpc/clnt.h>
23#include <linux/sunrpc/xprt.h>
24
25#ifdef RPC_DEBUG
26#define RPCDBG_FACILITY RPCDBG_SCHED
27#define RPC_TASK_MAGIC_ID 0xf00baa
28static int rpc_task_id;
29#endif
30
31/*
32 * RPC slabs and memory pools
33 */
34#define RPC_BUFFER_MAXSIZE (2048)
35#define RPC_BUFFER_POOLSIZE (8)
36#define RPC_TASK_POOLSIZE (8)
Eric Dumazetba899662005-08-26 12:05:31 -070037static kmem_cache_t *rpc_task_slabp __read_mostly;
38static kmem_cache_t *rpc_buffer_slabp __read_mostly;
39static mempool_t *rpc_task_mempool __read_mostly;
40static mempool_t *rpc_buffer_mempool __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -070041
42static void __rpc_default_timer(struct rpc_task *task);
43static void rpciod_killall(void);
44static void rpc_free(struct rpc_task *task);
45
46static void rpc_async_schedule(void *);
47
48/*
49 * RPC tasks that create another task (e.g. for contacting the portmapper)
50 * will wait on this queue for their child's completion
51 */
52static RPC_WAITQ(childq, "childq");
53
54/*
55 * RPC tasks sit here while waiting for conditions to improve.
56 */
57static RPC_WAITQ(delay_queue, "delayq");
58
59/*
60 * All RPC tasks are linked into this list
61 */
62static LIST_HEAD(all_tasks);
63
64/*
65 * rpciod-related stuff
66 */
67static DECLARE_MUTEX(rpciod_sema);
68static unsigned int rpciod_users;
69static struct workqueue_struct *rpciod_workqueue;
70
71/*
72 * Spinlock for other critical sections of code.
73 */
74static DEFINE_SPINLOCK(rpc_sched_lock);
75
76/*
77 * Disable the timer for a given RPC task. Should be called with
78 * queue->lock and bh_disabled in order to avoid races within
79 * rpc_run_timer().
80 */
81static inline void
82__rpc_disable_timer(struct rpc_task *task)
83{
84 dprintk("RPC: %4d disabling timer\n", task->tk_pid);
85 task->tk_timeout_fn = NULL;
86 task->tk_timeout = 0;
87}
88
89/*
90 * Run a timeout function.
91 * We use the callback in order to allow __rpc_wake_up_task()
92 * and friends to disable the timer synchronously on SMP systems
93 * without calling del_timer_sync(). The latter could cause a
94 * deadlock if called while we're holding spinlocks...
95 */
96static void rpc_run_timer(struct rpc_task *task)
97{
98 void (*callback)(struct rpc_task *);
99
100 callback = task->tk_timeout_fn;
101 task->tk_timeout_fn = NULL;
102 if (callback && RPC_IS_QUEUED(task)) {
103 dprintk("RPC: %4d running timer\n", task->tk_pid);
104 callback(task);
105 }
106 smp_mb__before_clear_bit();
107 clear_bit(RPC_TASK_HAS_TIMER, &task->tk_runstate);
108 smp_mb__after_clear_bit();
109}
110
111/*
112 * Set up a timer for the current task.
113 */
114static inline void
115__rpc_add_timer(struct rpc_task *task, rpc_action timer)
116{
117 if (!task->tk_timeout)
118 return;
119
120 dprintk("RPC: %4d setting alarm for %lu ms\n",
121 task->tk_pid, task->tk_timeout * 1000 / HZ);
122
123 if (timer)
124 task->tk_timeout_fn = timer;
125 else
126 task->tk_timeout_fn = __rpc_default_timer;
127 set_bit(RPC_TASK_HAS_TIMER, &task->tk_runstate);
128 mod_timer(&task->tk_timer, jiffies + task->tk_timeout);
129}
130
131/*
132 * Delete any timer for the current task. Because we use del_timer_sync(),
133 * this function should never be called while holding queue->lock.
134 */
135static void
136rpc_delete_timer(struct rpc_task *task)
137{
138 if (RPC_IS_QUEUED(task))
139 return;
140 if (test_and_clear_bit(RPC_TASK_HAS_TIMER, &task->tk_runstate)) {
141 del_singleshot_timer_sync(&task->tk_timer);
142 dprintk("RPC: %4d deleting timer\n", task->tk_pid);
143 }
144}
145
146/*
147 * Add new request to a priority queue.
148 */
149static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue, struct rpc_task *task)
150{
151 struct list_head *q;
152 struct rpc_task *t;
153
154 INIT_LIST_HEAD(&task->u.tk_wait.links);
155 q = &queue->tasks[task->tk_priority];
156 if (unlikely(task->tk_priority > queue->maxpriority))
157 q = &queue->tasks[queue->maxpriority];
158 list_for_each_entry(t, q, u.tk_wait.list) {
159 if (t->tk_cookie == task->tk_cookie) {
160 list_add_tail(&task->u.tk_wait.list, &t->u.tk_wait.links);
161 return;
162 }
163 }
164 list_add_tail(&task->u.tk_wait.list, q);
165}
166
167/*
168 * Add new request to wait queue.
169 *
170 * Swapper tasks always get inserted at the head of the queue.
171 * This should avoid many nasty memory deadlocks and hopefully
172 * improve overall performance.
173 * Everyone else gets appended to the queue to ensure proper FIFO behavior.
174 */
175static void __rpc_add_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task)
176{
177 BUG_ON (RPC_IS_QUEUED(task));
178
179 if (RPC_IS_PRIORITY(queue))
180 __rpc_add_wait_queue_priority(queue, task);
181 else if (RPC_IS_SWAPPER(task))
182 list_add(&task->u.tk_wait.list, &queue->tasks[0]);
183 else
184 list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]);
185 task->u.tk_wait.rpc_waitq = queue;
186 rpc_set_queued(task);
187
188 dprintk("RPC: %4d added to queue %p \"%s\"\n",
189 task->tk_pid, queue, rpc_qname(queue));
190}
191
192/*
193 * Remove request from a priority queue.
194 */
195static void __rpc_remove_wait_queue_priority(struct rpc_task *task)
196{
197 struct rpc_task *t;
198
199 if (!list_empty(&task->u.tk_wait.links)) {
200 t = list_entry(task->u.tk_wait.links.next, struct rpc_task, u.tk_wait.list);
201 list_move(&t->u.tk_wait.list, &task->u.tk_wait.list);
202 list_splice_init(&task->u.tk_wait.links, &t->u.tk_wait.links);
203 }
204 list_del(&task->u.tk_wait.list);
205}
206
207/*
208 * Remove request from queue.
209 * Note: must be called with spin lock held.
210 */
211static void __rpc_remove_wait_queue(struct rpc_task *task)
212{
213 struct rpc_wait_queue *queue;
214 queue = task->u.tk_wait.rpc_waitq;
215
216 if (RPC_IS_PRIORITY(queue))
217 __rpc_remove_wait_queue_priority(task);
218 else
219 list_del(&task->u.tk_wait.list);
220 dprintk("RPC: %4d removed from queue %p \"%s\"\n",
221 task->tk_pid, queue, rpc_qname(queue));
222}
223
224static inline void rpc_set_waitqueue_priority(struct rpc_wait_queue *queue, int priority)
225{
226 queue->priority = priority;
227 queue->count = 1 << (priority * 2);
228}
229
230static inline void rpc_set_waitqueue_cookie(struct rpc_wait_queue *queue, unsigned long cookie)
231{
232 queue->cookie = cookie;
233 queue->nr = RPC_BATCH_COUNT;
234}
235
236static inline void rpc_reset_waitqueue_priority(struct rpc_wait_queue *queue)
237{
238 rpc_set_waitqueue_priority(queue, queue->maxpriority);
239 rpc_set_waitqueue_cookie(queue, 0);
240}
241
242static void __rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname, int maxprio)
243{
244 int i;
245
246 spin_lock_init(&queue->lock);
247 for (i = 0; i < ARRAY_SIZE(queue->tasks); i++)
248 INIT_LIST_HEAD(&queue->tasks[i]);
249 queue->maxpriority = maxprio;
250 rpc_reset_waitqueue_priority(queue);
251#ifdef RPC_DEBUG
252 queue->name = qname;
253#endif
254}
255
256void rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname)
257{
258 __rpc_init_priority_wait_queue(queue, qname, RPC_PRIORITY_HIGH);
259}
260
261void rpc_init_wait_queue(struct rpc_wait_queue *queue, const char *qname)
262{
263 __rpc_init_priority_wait_queue(queue, qname, 0);
264}
265EXPORT_SYMBOL(rpc_init_wait_queue);
266
Trond Myklebust44c28872006-01-03 09:55:06 +0100267static int rpc_wait_bit_interruptible(void *word)
268{
269 if (signal_pending(current))
270 return -ERESTARTSYS;
271 schedule();
272 return 0;
273}
274
275/*
276 * Mark an RPC call as having completed by clearing the 'active' bit
277 */
278static inline void rpc_mark_complete_task(struct rpc_task *task)
279{
280 rpc_clear_active(task);
281 wake_up_bit(&task->tk_runstate, RPC_TASK_ACTIVE);
282}
283
284/*
285 * Allow callers to wait for completion of an RPC call
286 */
287int __rpc_wait_for_completion_task(struct rpc_task *task, int (*action)(void *))
288{
289 if (action == NULL)
290 action = rpc_wait_bit_interruptible;
291 return wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE,
292 action, TASK_INTERRUPTIBLE);
293}
294EXPORT_SYMBOL(__rpc_wait_for_completion_task);
295
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296/*
297 * Make an RPC task runnable.
298 *
299 * Note: If the task is ASYNC, this must be called with
300 * the spinlock held to protect the wait queue operation.
301 */
302static void rpc_make_runnable(struct rpc_task *task)
303{
304 int do_ret;
305
306 BUG_ON(task->tk_timeout_fn);
307 do_ret = rpc_test_and_set_running(task);
308 rpc_clear_queued(task);
309 if (do_ret)
310 return;
311 if (RPC_IS_ASYNC(task)) {
312 int status;
313
314 INIT_WORK(&task->u.tk_work, rpc_async_schedule, (void *)task);
315 status = queue_work(task->tk_workqueue, &task->u.tk_work);
316 if (status < 0) {
317 printk(KERN_WARNING "RPC: failed to add task to queue: error: %d!\n", status);
318 task->tk_status = status;
319 return;
320 }
321 } else
Trond Myklebust96651ab2005-06-22 17:16:21 +0000322 wake_up_bit(&task->tk_runstate, RPC_TASK_QUEUED);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323}
324
325/*
326 * Place a newly initialized task on the workqueue.
327 */
328static inline void
329rpc_schedule_run(struct rpc_task *task)
330{
Trond Myklebust44c28872006-01-03 09:55:06 +0100331 rpc_set_active(task);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700332 rpc_make_runnable(task);
333}
334
335/*
336 * Prepare for sleeping on a wait queue.
337 * By always appending tasks to the list we ensure FIFO behavior.
338 * NB: An RPC task will only receive interrupt-driven events as long
339 * as it's on a wait queue.
340 */
341static void __rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
342 rpc_action action, rpc_action timer)
343{
344 dprintk("RPC: %4d sleep_on(queue \"%s\" time %ld)\n", task->tk_pid,
345 rpc_qname(q), jiffies);
346
347 if (!RPC_IS_ASYNC(task) && !RPC_IS_ACTIVATED(task)) {
348 printk(KERN_ERR "RPC: Inactive synchronous task put to sleep!\n");
349 return;
350 }
351
352 /* Mark the task as being activated if so needed */
Trond Myklebust44c28872006-01-03 09:55:06 +0100353 rpc_set_active(task);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700354
355 __rpc_add_wait_queue(q, task);
356
357 BUG_ON(task->tk_callback != NULL);
358 task->tk_callback = action;
359 __rpc_add_timer(task, timer);
360}
361
362void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
363 rpc_action action, rpc_action timer)
364{
365 /*
366 * Protect the queue operations.
367 */
368 spin_lock_bh(&q->lock);
369 __rpc_sleep_on(q, task, action, timer);
370 spin_unlock_bh(&q->lock);
371}
372
373/**
374 * __rpc_do_wake_up_task - wake up a single rpc_task
375 * @task: task to be woken up
376 *
377 * Caller must hold queue->lock, and have cleared the task queued flag.
378 */
379static void __rpc_do_wake_up_task(struct rpc_task *task)
380{
381 dprintk("RPC: %4d __rpc_wake_up_task (now %ld)\n", task->tk_pid, jiffies);
382
383#ifdef RPC_DEBUG
384 BUG_ON(task->tk_magic != RPC_TASK_MAGIC_ID);
385#endif
386 /* Has the task been executed yet? If not, we cannot wake it up! */
387 if (!RPC_IS_ACTIVATED(task)) {
388 printk(KERN_ERR "RPC: Inactive task (%p) being woken up!\n", task);
389 return;
390 }
391
392 __rpc_disable_timer(task);
393 __rpc_remove_wait_queue(task);
394
395 rpc_make_runnable(task);
396
397 dprintk("RPC: __rpc_wake_up_task done\n");
398}
399
400/*
401 * Wake up the specified task
402 */
403static void __rpc_wake_up_task(struct rpc_task *task)
404{
405 if (rpc_start_wakeup(task)) {
406 if (RPC_IS_QUEUED(task))
407 __rpc_do_wake_up_task(task);
408 rpc_finish_wakeup(task);
409 }
410}
411
412/*
413 * Default timeout handler if none specified by user
414 */
415static void
416__rpc_default_timer(struct rpc_task *task)
417{
418 dprintk("RPC: %d timeout (default timer)\n", task->tk_pid);
419 task->tk_status = -ETIMEDOUT;
420 rpc_wake_up_task(task);
421}
422
423/*
424 * Wake up the specified task
425 */
426void rpc_wake_up_task(struct rpc_task *task)
427{
428 if (rpc_start_wakeup(task)) {
429 if (RPC_IS_QUEUED(task)) {
430 struct rpc_wait_queue *queue = task->u.tk_wait.rpc_waitq;
431
432 spin_lock_bh(&queue->lock);
433 __rpc_do_wake_up_task(task);
434 spin_unlock_bh(&queue->lock);
435 }
436 rpc_finish_wakeup(task);
437 }
438}
439
440/*
441 * Wake up the next task on a priority queue.
442 */
443static struct rpc_task * __rpc_wake_up_next_priority(struct rpc_wait_queue *queue)
444{
445 struct list_head *q;
446 struct rpc_task *task;
447
448 /*
449 * Service a batch of tasks from a single cookie.
450 */
451 q = &queue->tasks[queue->priority];
452 if (!list_empty(q)) {
453 task = list_entry(q->next, struct rpc_task, u.tk_wait.list);
454 if (queue->cookie == task->tk_cookie) {
455 if (--queue->nr)
456 goto out;
457 list_move_tail(&task->u.tk_wait.list, q);
458 }
459 /*
460 * Check if we need to switch queues.
461 */
462 if (--queue->count)
463 goto new_cookie;
464 }
465
466 /*
467 * Service the next queue.
468 */
469 do {
470 if (q == &queue->tasks[0])
471 q = &queue->tasks[queue->maxpriority];
472 else
473 q = q - 1;
474 if (!list_empty(q)) {
475 task = list_entry(q->next, struct rpc_task, u.tk_wait.list);
476 goto new_queue;
477 }
478 } while (q != &queue->tasks[queue->priority]);
479
480 rpc_reset_waitqueue_priority(queue);
481 return NULL;
482
483new_queue:
484 rpc_set_waitqueue_priority(queue, (unsigned int)(q - &queue->tasks[0]));
485new_cookie:
486 rpc_set_waitqueue_cookie(queue, task->tk_cookie);
487out:
488 __rpc_wake_up_task(task);
489 return task;
490}
491
492/*
493 * Wake up the next task on the wait queue.
494 */
495struct rpc_task * rpc_wake_up_next(struct rpc_wait_queue *queue)
496{
497 struct rpc_task *task = NULL;
498
499 dprintk("RPC: wake_up_next(%p \"%s\")\n", queue, rpc_qname(queue));
500 spin_lock_bh(&queue->lock);
501 if (RPC_IS_PRIORITY(queue))
502 task = __rpc_wake_up_next_priority(queue);
503 else {
504 task_for_first(task, &queue->tasks[0])
505 __rpc_wake_up_task(task);
506 }
507 spin_unlock_bh(&queue->lock);
508
509 return task;
510}
511
512/**
513 * rpc_wake_up - wake up all rpc_tasks
514 * @queue: rpc_wait_queue on which the tasks are sleeping
515 *
516 * Grabs queue->lock
517 */
518void rpc_wake_up(struct rpc_wait_queue *queue)
519{
520 struct rpc_task *task;
521
522 struct list_head *head;
523 spin_lock_bh(&queue->lock);
524 head = &queue->tasks[queue->maxpriority];
525 for (;;) {
526 while (!list_empty(head)) {
527 task = list_entry(head->next, struct rpc_task, u.tk_wait.list);
528 __rpc_wake_up_task(task);
529 }
530 if (head == &queue->tasks[0])
531 break;
532 head--;
533 }
534 spin_unlock_bh(&queue->lock);
535}
536
537/**
538 * rpc_wake_up_status - wake up all rpc_tasks and set their status value.
539 * @queue: rpc_wait_queue on which the tasks are sleeping
540 * @status: status value to set
541 *
542 * Grabs queue->lock
543 */
544void rpc_wake_up_status(struct rpc_wait_queue *queue, int status)
545{
546 struct list_head *head;
547 struct rpc_task *task;
548
549 spin_lock_bh(&queue->lock);
550 head = &queue->tasks[queue->maxpriority];
551 for (;;) {
552 while (!list_empty(head)) {
553 task = list_entry(head->next, struct rpc_task, u.tk_wait.list);
554 task->tk_status = status;
555 __rpc_wake_up_task(task);
556 }
557 if (head == &queue->tasks[0])
558 break;
559 head--;
560 }
561 spin_unlock_bh(&queue->lock);
562}
563
564/*
565 * Run a task at a later time
566 */
567static void __rpc_atrun(struct rpc_task *);
568void
569rpc_delay(struct rpc_task *task, unsigned long delay)
570{
571 task->tk_timeout = delay;
572 rpc_sleep_on(&delay_queue, task, NULL, __rpc_atrun);
573}
574
575static void
576__rpc_atrun(struct rpc_task *task)
577{
578 task->tk_status = 0;
579 rpc_wake_up_task(task);
580}
581
582/*
Trond Myklebust4ce70ad2006-01-03 09:55:05 +0100583 * Helper to call task->tk_ops->rpc_call_prepare
584 */
585static void rpc_prepare_task(struct rpc_task *task)
586{
587 task->tk_ops->rpc_call_prepare(task, task->tk_calldata);
588}
589
590/*
Trond Myklebust963d8fe2006-01-03 09:55:04 +0100591 * Helper that calls task->tk_ops->rpc_call_done if it exists
Trond Myklebustd05fdb02005-06-22 17:16:19 +0000592 */
Trond Myklebustabbcf282006-01-03 09:55:03 +0100593void rpc_exit_task(struct rpc_task *task)
Trond Myklebustd05fdb02005-06-22 17:16:19 +0000594{
Trond Myklebustabbcf282006-01-03 09:55:03 +0100595 task->tk_action = NULL;
Trond Myklebust963d8fe2006-01-03 09:55:04 +0100596 if (task->tk_ops->rpc_call_done != NULL) {
597 task->tk_ops->rpc_call_done(task, task->tk_calldata);
Trond Myklebustd05fdb02005-06-22 17:16:19 +0000598 if (task->tk_action != NULL) {
Trond Myklebustabbcf282006-01-03 09:55:03 +0100599 WARN_ON(RPC_ASSASSINATED(task));
600 /* Always release the RPC slot and buffer memory */
601 xprt_release(task);
602 rpc_free(task);
Trond Myklebustd05fdb02005-06-22 17:16:19 +0000603 }
604 }
Trond Myklebustd05fdb02005-06-22 17:16:19 +0000605}
Trond Myklebustabbcf282006-01-03 09:55:03 +0100606EXPORT_SYMBOL(rpc_exit_task);
Trond Myklebustd05fdb02005-06-22 17:16:19 +0000607
608/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700609 * This is the RPC `scheduler' (or rather, the finite state machine).
610 */
611static int __rpc_execute(struct rpc_task *task)
612{
613 int status = 0;
614
615 dprintk("RPC: %4d rpc_execute flgs %x\n",
616 task->tk_pid, task->tk_flags);
617
618 BUG_ON(RPC_IS_QUEUED(task));
619
Trond Myklebustd05fdb02005-06-22 17:16:19 +0000620 for (;;) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700621 /*
622 * Garbage collection of pending timers...
623 */
624 rpc_delete_timer(task);
625
626 /*
627 * Execute any pending callback.
628 */
629 if (RPC_DO_CALLBACK(task)) {
630 /* Define a callback save pointer */
631 void (*save_callback)(struct rpc_task *);
632
633 /*
634 * If a callback exists, save it, reset it,
635 * call it.
636 * The save is needed to stop from resetting
637 * another callback set within the callback handler
638 * - Dave
639 */
640 save_callback=task->tk_callback;
641 task->tk_callback=NULL;
642 lock_kernel();
643 save_callback(task);
644 unlock_kernel();
645 }
646
647 /*
648 * Perform the next FSM step.
649 * tk_action may be NULL when the task has been killed
650 * by someone else.
651 */
652 if (!RPC_IS_QUEUED(task)) {
Trond Myklebustabbcf282006-01-03 09:55:03 +0100653 if (task->tk_action == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654 break;
Trond Myklebustabbcf282006-01-03 09:55:03 +0100655 lock_kernel();
656 task->tk_action(task);
657 unlock_kernel();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700658 }
659
660 /*
661 * Lockless check for whether task is sleeping or not.
662 */
663 if (!RPC_IS_QUEUED(task))
664 continue;
665 rpc_clear_running(task);
666 if (RPC_IS_ASYNC(task)) {
667 /* Careful! we may have raced... */
668 if (RPC_IS_QUEUED(task))
669 return 0;
670 if (rpc_test_and_set_running(task))
671 return 0;
672 continue;
673 }
674
675 /* sync task: sleep here */
676 dprintk("RPC: %4d sync task going to sleep\n", task->tk_pid);
Trond Myklebust96651ab2005-06-22 17:16:21 +0000677 /* Note: Caller should be using rpc_clnt_sigmask() */
678 status = out_of_line_wait_on_bit(&task->tk_runstate,
679 RPC_TASK_QUEUED, rpc_wait_bit_interruptible,
680 TASK_INTERRUPTIBLE);
681 if (status == -ERESTARTSYS) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700682 /*
683 * When a sync task receives a signal, it exits with
684 * -ERESTARTSYS. In order to catch any callbacks that
685 * clean up after sleeping on some queue, we don't
686 * break the loop here, but go around once more.
687 */
Trond Myklebust96651ab2005-06-22 17:16:21 +0000688 dprintk("RPC: %4d got signal\n", task->tk_pid);
689 task->tk_flags |= RPC_TASK_KILLED;
690 rpc_exit(task, -ERESTARTSYS);
691 rpc_wake_up_task(task);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700692 }
693 rpc_set_running(task);
694 dprintk("RPC: %4d sync task resuming\n", task->tk_pid);
695 }
696
Trond Myklebuste60859a2006-01-03 09:55:10 +0100697 dprintk("RPC: %4d, return %d, status %d\n", task->tk_pid, status, task->tk_status);
Trond Myklebust44c28872006-01-03 09:55:06 +0100698 /* Wake up anyone who is waiting for task completion */
699 rpc_mark_complete_task(task);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700700 /* Release all resources associated with the task */
701 rpc_release_task(task);
702 return status;
703}
704
705/*
706 * User-visible entry point to the scheduler.
707 *
708 * This may be called recursively if e.g. an async NFS task updates
709 * the attributes and finds that dirty pages must be flushed.
710 * NOTE: Upon exit of this function the task is guaranteed to be
711 * released. In particular note that tk_release() will have
712 * been called, so your task memory may have been freed.
713 */
714int
715rpc_execute(struct rpc_task *task)
716{
Trond Myklebust44c28872006-01-03 09:55:06 +0100717 rpc_set_active(task);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700718 rpc_set_running(task);
719 return __rpc_execute(task);
720}
721
722static void rpc_async_schedule(void *arg)
723{
724 __rpc_execute((struct rpc_task *)arg);
725}
726
727/*
728 * Allocate memory for RPC purposes.
729 *
730 * We try to ensure that some NFS reads and writes can always proceed
731 * by using a mempool when allocating 'small' buffers.
732 * In order to avoid memory starvation triggering more writebacks of
733 * NFS requests, we use GFP_NOFS rather than GFP_KERNEL.
734 */
735void *
736rpc_malloc(struct rpc_task *task, size_t size)
737{
Al Virodd0fc662005-10-07 07:46:04 +0100738 gfp_t gfp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700739
740 if (task->tk_flags & RPC_TASK_SWAPPER)
741 gfp = GFP_ATOMIC;
742 else
743 gfp = GFP_NOFS;
744
745 if (size > RPC_BUFFER_MAXSIZE) {
746 task->tk_buffer = kmalloc(size, gfp);
747 if (task->tk_buffer)
748 task->tk_bufsize = size;
749 } else {
750 task->tk_buffer = mempool_alloc(rpc_buffer_mempool, gfp);
751 if (task->tk_buffer)
752 task->tk_bufsize = RPC_BUFFER_MAXSIZE;
753 }
754 return task->tk_buffer;
755}
756
757static void
758rpc_free(struct rpc_task *task)
759{
760 if (task->tk_buffer) {
761 if (task->tk_bufsize == RPC_BUFFER_MAXSIZE)
762 mempool_free(task->tk_buffer, rpc_buffer_mempool);
763 else
764 kfree(task->tk_buffer);
765 task->tk_buffer = NULL;
766 task->tk_bufsize = 0;
767 }
768}
769
770/*
771 * Creation and deletion of RPC task structures
772 */
Trond Myklebust963d8fe2006-01-03 09:55:04 +0100773void rpc_init_task(struct rpc_task *task, struct rpc_clnt *clnt, int flags, const struct rpc_call_ops *tk_ops, void *calldata)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700774{
775 memset(task, 0, sizeof(*task));
776 init_timer(&task->tk_timer);
777 task->tk_timer.data = (unsigned long) task;
778 task->tk_timer.function = (void (*)(unsigned long)) rpc_run_timer;
Trond Myklebust44c28872006-01-03 09:55:06 +0100779 atomic_set(&task->tk_count, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700780 task->tk_client = clnt;
781 task->tk_flags = flags;
Trond Myklebust963d8fe2006-01-03 09:55:04 +0100782 task->tk_ops = tk_ops;
Trond Myklebust4ce70ad2006-01-03 09:55:05 +0100783 if (tk_ops->rpc_call_prepare != NULL)
784 task->tk_action = rpc_prepare_task;
Trond Myklebust963d8fe2006-01-03 09:55:04 +0100785 task->tk_calldata = calldata;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700786
787 /* Initialize retry counters */
788 task->tk_garb_retry = 2;
789 task->tk_cred_retry = 2;
790
791 task->tk_priority = RPC_PRIORITY_NORMAL;
792 task->tk_cookie = (unsigned long)current;
793
794 /* Initialize workqueue for async tasks */
795 task->tk_workqueue = rpciod_workqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700796
797 if (clnt) {
798 atomic_inc(&clnt->cl_users);
799 if (clnt->cl_softrtry)
800 task->tk_flags |= RPC_TASK_SOFT;
801 if (!clnt->cl_intr)
802 task->tk_flags |= RPC_TASK_NOINTR;
803 }
804
805#ifdef RPC_DEBUG
806 task->tk_magic = RPC_TASK_MAGIC_ID;
807 task->tk_pid = rpc_task_id++;
808#endif
809 /* Add to global list of all tasks */
810 spin_lock(&rpc_sched_lock);
811 list_add_tail(&task->tk_task, &all_tasks);
812 spin_unlock(&rpc_sched_lock);
813
Trond Myklebust963d8fe2006-01-03 09:55:04 +0100814 BUG_ON(task->tk_ops == NULL);
815
Linus Torvalds1da177e2005-04-16 15:20:36 -0700816 dprintk("RPC: %4d new task procpid %d\n", task->tk_pid,
817 current->pid);
818}
819
820static struct rpc_task *
821rpc_alloc_task(void)
822{
823 return (struct rpc_task *)mempool_alloc(rpc_task_mempool, GFP_NOFS);
824}
825
Trond Myklebust963d8fe2006-01-03 09:55:04 +0100826static void rpc_free_task(struct rpc_task *task)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700827{
828 dprintk("RPC: %4d freeing task\n", task->tk_pid);
829 mempool_free(task, rpc_task_mempool);
830}
831
832/*
833 * Create a new task for the specified client. We have to
834 * clean up after an allocation failure, as the client may
835 * have specified "oneshot".
836 */
Trond Myklebust963d8fe2006-01-03 09:55:04 +0100837struct rpc_task *rpc_new_task(struct rpc_clnt *clnt, int flags, const struct rpc_call_ops *tk_ops, void *calldata)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700838{
839 struct rpc_task *task;
840
841 task = rpc_alloc_task();
842 if (!task)
843 goto cleanup;
844
Trond Myklebust963d8fe2006-01-03 09:55:04 +0100845 rpc_init_task(task, clnt, flags, tk_ops, calldata);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700846
847 dprintk("RPC: %4d allocated task\n", task->tk_pid);
848 task->tk_flags |= RPC_TASK_DYNAMIC;
849out:
850 return task;
851
852cleanup:
853 /* Check whether to release the client */
854 if (clnt) {
855 printk("rpc_new_task: failed, users=%d, oneshot=%d\n",
856 atomic_read(&clnt->cl_users), clnt->cl_oneshot);
857 atomic_inc(&clnt->cl_users); /* pretend we were used ... */
858 rpc_release_client(clnt);
859 }
860 goto out;
861}
862
863void rpc_release_task(struct rpc_task *task)
864{
Trond Myklebust963d8fe2006-01-03 09:55:04 +0100865 const struct rpc_call_ops *tk_ops = task->tk_ops;
866 void *calldata = task->tk_calldata;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700867
868#ifdef RPC_DEBUG
869 BUG_ON(task->tk_magic != RPC_TASK_MAGIC_ID);
870#endif
Trond Myklebust44c28872006-01-03 09:55:06 +0100871 if (!atomic_dec_and_test(&task->tk_count))
872 return;
873 dprintk("RPC: %4d release task\n", task->tk_pid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700874
875 /* Remove from global task list */
876 spin_lock(&rpc_sched_lock);
877 list_del(&task->tk_task);
878 spin_unlock(&rpc_sched_lock);
879
880 BUG_ON (RPC_IS_QUEUED(task));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700881
882 /* Synchronously delete any running timer */
883 rpc_delete_timer(task);
884
885 /* Release resources */
886 if (task->tk_rqstp)
887 xprt_release(task);
888 if (task->tk_msg.rpc_cred)
889 rpcauth_unbindcred(task);
890 rpc_free(task);
891 if (task->tk_client) {
892 rpc_release_client(task->tk_client);
893 task->tk_client = NULL;
894 }
895
896#ifdef RPC_DEBUG
897 task->tk_magic = 0;
898#endif
Trond Myklebust963d8fe2006-01-03 09:55:04 +0100899 if (task->tk_flags & RPC_TASK_DYNAMIC)
900 rpc_free_task(task);
901 if (tk_ops->rpc_release)
902 tk_ops->rpc_release(calldata);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700903}
904
905/**
Trond Myklebust44c28872006-01-03 09:55:06 +0100906 * rpc_run_task - Allocate a new RPC task, then run rpc_execute against it
907 * @clnt - pointer to RPC client
908 * @flags - RPC flags
909 * @ops - RPC call ops
910 * @data - user call data
911 */
912struct rpc_task *rpc_run_task(struct rpc_clnt *clnt, int flags,
913 const struct rpc_call_ops *ops,
914 void *data)
915{
916 struct rpc_task *task;
917 task = rpc_new_task(clnt, flags, ops, data);
918 if (task == NULL)
919 return ERR_PTR(-ENOMEM);
920 atomic_inc(&task->tk_count);
921 rpc_execute(task);
922 return task;
923}
924EXPORT_SYMBOL(rpc_run_task);
925
926/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700927 * rpc_find_parent - find the parent of a child task.
928 * @child: child task
929 *
930 * Checks that the parent task is still sleeping on the
931 * queue 'childq'. If so returns a pointer to the parent.
932 * Upon failure returns NULL.
933 *
934 * Caller must hold childq.lock
935 */
Trond Myklebust963d8fe2006-01-03 09:55:04 +0100936static inline struct rpc_task *rpc_find_parent(struct rpc_task *child, struct rpc_task *parent)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700937{
Trond Myklebust963d8fe2006-01-03 09:55:04 +0100938 struct rpc_task *task;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700939 struct list_head *le;
940
Linus Torvalds1da177e2005-04-16 15:20:36 -0700941 task_for_each(task, le, &childq.tasks[0])
942 if (task == parent)
943 return parent;
944
945 return NULL;
946}
947
Trond Myklebust963d8fe2006-01-03 09:55:04 +0100948static void rpc_child_exit(struct rpc_task *child, void *calldata)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700949{
950 struct rpc_task *parent;
951
952 spin_lock_bh(&childq.lock);
Trond Myklebust963d8fe2006-01-03 09:55:04 +0100953 if ((parent = rpc_find_parent(child, calldata)) != NULL) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700954 parent->tk_status = child->tk_status;
955 __rpc_wake_up_task(parent);
956 }
957 spin_unlock_bh(&childq.lock);
958}
959
Trond Myklebust963d8fe2006-01-03 09:55:04 +0100960static const struct rpc_call_ops rpc_child_ops = {
961 .rpc_call_done = rpc_child_exit,
962};
963
Linus Torvalds1da177e2005-04-16 15:20:36 -0700964/*
965 * Note: rpc_new_task releases the client after a failure.
966 */
967struct rpc_task *
968rpc_new_child(struct rpc_clnt *clnt, struct rpc_task *parent)
969{
970 struct rpc_task *task;
971
Trond Myklebust963d8fe2006-01-03 09:55:04 +0100972 task = rpc_new_task(clnt, RPC_TASK_ASYNC | RPC_TASK_CHILD, &rpc_child_ops, parent);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700973 if (!task)
974 goto fail;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700975 return task;
976
977fail:
978 parent->tk_status = -ENOMEM;
979 return NULL;
980}
981
982void rpc_run_child(struct rpc_task *task, struct rpc_task *child, rpc_action func)
983{
984 spin_lock_bh(&childq.lock);
985 /* N.B. Is it possible for the child to have already finished? */
986 __rpc_sleep_on(&childq, task, func, NULL);
987 rpc_schedule_run(child);
988 spin_unlock_bh(&childq.lock);
989}
990
991/*
992 * Kill all tasks for the given client.
993 * XXX: kill their descendants as well?
994 */
995void rpc_killall_tasks(struct rpc_clnt *clnt)
996{
997 struct rpc_task *rovr;
998 struct list_head *le;
999
1000 dprintk("RPC: killing all tasks for client %p\n", clnt);
1001
1002 /*
1003 * Spin lock all_tasks to prevent changes...
1004 */
1005 spin_lock(&rpc_sched_lock);
1006 alltask_for_each(rovr, le, &all_tasks) {
1007 if (! RPC_IS_ACTIVATED(rovr))
1008 continue;
1009 if (!clnt || rovr->tk_client == clnt) {
1010 rovr->tk_flags |= RPC_TASK_KILLED;
1011 rpc_exit(rovr, -EIO);
1012 rpc_wake_up_task(rovr);
1013 }
1014 }
1015 spin_unlock(&rpc_sched_lock);
1016}
1017
1018static DECLARE_MUTEX_LOCKED(rpciod_running);
1019
1020static void rpciod_killall(void)
1021{
1022 unsigned long flags;
1023
1024 while (!list_empty(&all_tasks)) {
1025 clear_thread_flag(TIF_SIGPENDING);
1026 rpc_killall_tasks(NULL);
1027 flush_workqueue(rpciod_workqueue);
1028 if (!list_empty(&all_tasks)) {
1029 dprintk("rpciod_killall: waiting for tasks to exit\n");
1030 yield();
1031 }
1032 }
1033
1034 spin_lock_irqsave(&current->sighand->siglock, flags);
1035 recalc_sigpending();
1036 spin_unlock_irqrestore(&current->sighand->siglock, flags);
1037}
1038
1039/*
1040 * Start up the rpciod process if it's not already running.
1041 */
1042int
1043rpciod_up(void)
1044{
1045 struct workqueue_struct *wq;
1046 int error = 0;
1047
1048 down(&rpciod_sema);
1049 dprintk("rpciod_up: users %d\n", rpciod_users);
1050 rpciod_users++;
1051 if (rpciod_workqueue)
1052 goto out;
1053 /*
1054 * If there's no pid, we should be the first user.
1055 */
1056 if (rpciod_users > 1)
1057 printk(KERN_WARNING "rpciod_up: no workqueue, %d users??\n", rpciod_users);
1058 /*
1059 * Create the rpciod thread and wait for it to start.
1060 */
1061 error = -ENOMEM;
1062 wq = create_workqueue("rpciod");
1063 if (wq == NULL) {
1064 printk(KERN_WARNING "rpciod_up: create workqueue failed, error=%d\n", error);
1065 rpciod_users--;
1066 goto out;
1067 }
1068 rpciod_workqueue = wq;
1069 error = 0;
1070out:
1071 up(&rpciod_sema);
1072 return error;
1073}
1074
1075void
1076rpciod_down(void)
1077{
1078 down(&rpciod_sema);
1079 dprintk("rpciod_down sema %d\n", rpciod_users);
1080 if (rpciod_users) {
1081 if (--rpciod_users)
1082 goto out;
1083 } else
1084 printk(KERN_WARNING "rpciod_down: no users??\n");
1085
1086 if (!rpciod_workqueue) {
1087 dprintk("rpciod_down: Nothing to do!\n");
1088 goto out;
1089 }
1090 rpciod_killall();
1091
1092 destroy_workqueue(rpciod_workqueue);
1093 rpciod_workqueue = NULL;
1094 out:
1095 up(&rpciod_sema);
1096}
1097
1098#ifdef RPC_DEBUG
1099void rpc_show_tasks(void)
1100{
1101 struct list_head *le;
1102 struct rpc_task *t;
1103
1104 spin_lock(&rpc_sched_lock);
1105 if (list_empty(&all_tasks)) {
1106 spin_unlock(&rpc_sched_lock);
1107 return;
1108 }
1109 printk("-pid- proc flgs status -client- -prog- --rqstp- -timeout "
Trond Myklebust963d8fe2006-01-03 09:55:04 +01001110 "-rpcwait -action- ---ops--\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001111 alltask_for_each(t, le, &all_tasks) {
1112 const char *rpc_waitq = "none";
1113
1114 if (RPC_IS_QUEUED(t))
1115 rpc_waitq = rpc_qname(t->u.tk_wait.rpc_waitq);
1116
1117 printk("%05d %04d %04x %06d %8p %6d %8p %08ld %8s %8p %8p\n",
1118 t->tk_pid,
1119 (t->tk_msg.rpc_proc ? t->tk_msg.rpc_proc->p_proc : -1),
1120 t->tk_flags, t->tk_status,
1121 t->tk_client,
1122 (t->tk_client ? t->tk_client->cl_prog : 0),
1123 t->tk_rqstp, t->tk_timeout,
1124 rpc_waitq,
Trond Myklebust963d8fe2006-01-03 09:55:04 +01001125 t->tk_action, t->tk_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001126 }
1127 spin_unlock(&rpc_sched_lock);
1128}
1129#endif
1130
1131void
1132rpc_destroy_mempool(void)
1133{
1134 if (rpc_buffer_mempool)
1135 mempool_destroy(rpc_buffer_mempool);
1136 if (rpc_task_mempool)
1137 mempool_destroy(rpc_task_mempool);
1138 if (rpc_task_slabp && kmem_cache_destroy(rpc_task_slabp))
1139 printk(KERN_INFO "rpc_task: not all structures were freed\n");
1140 if (rpc_buffer_slabp && kmem_cache_destroy(rpc_buffer_slabp))
1141 printk(KERN_INFO "rpc_buffers: not all structures were freed\n");
1142}
1143
1144int
1145rpc_init_mempool(void)
1146{
1147 rpc_task_slabp = kmem_cache_create("rpc_tasks",
1148 sizeof(struct rpc_task),
1149 0, SLAB_HWCACHE_ALIGN,
1150 NULL, NULL);
1151 if (!rpc_task_slabp)
1152 goto err_nomem;
1153 rpc_buffer_slabp = kmem_cache_create("rpc_buffers",
1154 RPC_BUFFER_MAXSIZE,
1155 0, SLAB_HWCACHE_ALIGN,
1156 NULL, NULL);
1157 if (!rpc_buffer_slabp)
1158 goto err_nomem;
1159 rpc_task_mempool = mempool_create(RPC_TASK_POOLSIZE,
1160 mempool_alloc_slab,
1161 mempool_free_slab,
1162 rpc_task_slabp);
1163 if (!rpc_task_mempool)
1164 goto err_nomem;
1165 rpc_buffer_mempool = mempool_create(RPC_BUFFER_POOLSIZE,
1166 mempool_alloc_slab,
1167 mempool_free_slab,
1168 rpc_buffer_slabp);
1169 if (!rpc_buffer_mempool)
1170 goto err_nomem;
1171 return 0;
1172err_nomem:
1173 rpc_destroy_mempool();
1174 return -ENOMEM;
1175}