blob: 91d4e1742a0c4ec1cd8805b6663f5a95b5d42b3b [file] [log] [blame]
Oleg Nesterove73f8952012-05-11 10:59:07 +10001#include <linux/spinlock.h>
2#include <linux/task_work.h>
3#include <linux/tracehook.h>
4
5int
Al Viro67d12142012-06-27 11:07:19 +04006task_work_add(struct task_struct *task, struct callback_head *twork, bool notify)
Oleg Nesterove73f8952012-05-11 10:59:07 +10007{
Al Viroed3e6942012-06-27 11:31:24 +04008 struct callback_head *last, *first;
Oleg Nesterove73f8952012-05-11 10:59:07 +10009 unsigned long flags;
Oleg Nesterove73f8952012-05-11 10:59:07 +100010
Oleg Nesterove73f8952012-05-11 10:59:07 +100011 /*
Al Viroed3e6942012-06-27 11:31:24 +040012 * Not inserting the new work if the task has already passed
13 * exit_task_work() is the responisbility of callers.
Oleg Nesterove73f8952012-05-11 10:59:07 +100014 */
15 raw_spin_lock_irqsave(&task->pi_lock, flags);
Al Viroed3e6942012-06-27 11:31:24 +040016 last = task->task_works;
17 first = last ? last->next : twork;
18 twork->next = first;
19 if (last)
20 last->next = twork;
21 task->task_works = twork;
Oleg Nesterove73f8952012-05-11 10:59:07 +100022 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
23
24 /* test_and_set_bit() implies mb(), see tracehook_notify_resume(). */
Al Viroed3e6942012-06-27 11:31:24 +040025 if (notify)
Oleg Nesterove73f8952012-05-11 10:59:07 +100026 set_notify_resume(task);
Al Viroed3e6942012-06-27 11:31:24 +040027 return 0;
Oleg Nesterove73f8952012-05-11 10:59:07 +100028}
29
Al Viro67d12142012-06-27 11:07:19 +040030struct callback_head *
Oleg Nesterove73f8952012-05-11 10:59:07 +100031task_work_cancel(struct task_struct *task, task_work_func_t func)
32{
33 unsigned long flags;
Al Viro67d12142012-06-27 11:07:19 +040034 struct callback_head *last, *res = NULL;
Oleg Nesterove73f8952012-05-11 10:59:07 +100035
36 raw_spin_lock_irqsave(&task->pi_lock, flags);
Al Viro158e1642012-06-27 09:24:13 +040037 last = task->task_works;
38 if (last) {
Al Viro67d12142012-06-27 11:07:19 +040039 struct callback_head *q = last, *p = q->next;
Al Viro158e1642012-06-27 09:24:13 +040040 while (1) {
41 if (p->func == func) {
42 q->next = p->next;
43 if (p == last)
44 task->task_works = q == p ? NULL : q;
45 res = p;
46 break;
47 }
48 if (p == last)
49 break;
50 q = p;
51 p = q->next;
Oleg Nesterove73f8952012-05-11 10:59:07 +100052 }
53 }
Oleg Nesterove73f8952012-05-11 10:59:07 +100054 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
Al Viro158e1642012-06-27 09:24:13 +040055 return res;
Oleg Nesterove73f8952012-05-11 10:59:07 +100056}
57
58void task_work_run(void)
59{
60 struct task_struct *task = current;
Al Viro67d12142012-06-27 11:07:19 +040061 struct callback_head *p, *q;
Oleg Nesterove73f8952012-05-11 10:59:07 +100062
Al Viroa2d4c712012-06-27 11:33:29 +040063 while (1) {
64 raw_spin_lock_irq(&task->pi_lock);
65 p = task->task_works;
66 task->task_works = NULL;
67 raw_spin_unlock_irq(&task->pi_lock);
Oleg Nesterove73f8952012-05-11 10:59:07 +100068
Al Viroa2d4c712012-06-27 11:33:29 +040069 if (unlikely(!p))
70 return;
Oleg Nesterove73f8952012-05-11 10:59:07 +100071
Al Viroa2d4c712012-06-27 11:33:29 +040072 q = p->next; /* head */
73 p->next = NULL; /* cut it */
74 while (q) {
75 p = q->next;
76 q->func(q);
77 q = p;
78 }
Oleg Nesterove73f8952012-05-11 10:59:07 +100079 }
80}