blob: c38893b0efbaa39d770b11546dba9e97ec53ff51 [file] [log] [blame]
Matt Helsley8174f152008-10-18 20:27:19 -07001/*
2 * kernel/freezer.c - Function to freeze a process
3 *
4 * Originally from kernel/power/process.c
5 */
6
7#include <linux/interrupt.h>
8#include <linux/suspend.h>
Paul Gortmaker9984de12011-05-23 14:51:41 -04009#include <linux/export.h>
Matt Helsley8174f152008-10-18 20:27:19 -070010#include <linux/syscalls.h>
11#include <linux/freezer.h>
Tejun Heo8a32c442011-11-21 12:32:23 -080012#include <linux/kthread.h>
Matt Helsley8174f152008-10-18 20:27:19 -070013
Tejun Heoa3201222011-11-21 12:32:25 -080014/* total number of freezing conditions in effect */
15atomic_t system_freezing_cnt = ATOMIC_INIT(0);
16EXPORT_SYMBOL(system_freezing_cnt);
17
18/* indicate whether PM freezing is in effect, protected by pm_mutex */
19bool pm_freezing;
20bool pm_nosig_freezing;
21
Tejun Heo0c9af092011-11-21 12:32:24 -080022/* protects freezing and frozen transitions */
23static DEFINE_SPINLOCK(freezer_lock);
Matt Helsley8174f152008-10-18 20:27:19 -070024
Tejun Heoa3201222011-11-21 12:32:25 -080025/**
26 * freezing_slow_path - slow path for testing whether a task needs to be frozen
27 * @p: task to be tested
28 *
29 * This function is called by freezing() if system_freezing_cnt isn't zero
30 * and tests whether @p needs to enter and stay in frozen state. Can be
31 * called under any context. The freezers are responsible for ensuring the
32 * target tasks see the updated state.
33 */
34bool freezing_slow_path(struct task_struct *p)
35{
36 if (p->flags & PF_NOFREEZE)
37 return false;
38
39 if (pm_nosig_freezing || cgroup_freezing(p))
40 return true;
41
Tejun Heo34b087e2011-11-23 09:28:17 -080042 if (pm_freezing && !(p->flags & PF_KTHREAD))
Tejun Heoa3201222011-11-21 12:32:25 -080043 return true;
44
45 return false;
46}
47EXPORT_SYMBOL(freezing_slow_path);
48
Matt Helsley8174f152008-10-18 20:27:19 -070049/* Refrigerator is place where frozen processes are stored :-). */
Tejun Heo8a32c442011-11-21 12:32:23 -080050bool __refrigerator(bool check_kthr_stop)
Matt Helsley8174f152008-10-18 20:27:19 -070051{
52 /* Hmm, should we be allowed to suspend when there are realtime
53 processes around? */
Tejun Heoa0acae02011-11-21 12:32:22 -080054 bool was_frozen = false;
Tejun Heo5ece3ea2011-11-21 12:32:26 -080055 long save = current->state;
Matt Helsley8174f152008-10-18 20:27:19 -070056
Matt Helsley8174f152008-10-18 20:27:19 -070057 pr_debug("%s entered refrigerator\n", current->comm);
58
Matt Helsley8174f152008-10-18 20:27:19 -070059 for (;;) {
60 set_current_state(TASK_UNINTERRUPTIBLE);
Tejun Heo5ece3ea2011-11-21 12:32:26 -080061
62 spin_lock_irq(&freezer_lock);
63 current->flags |= PF_FROZEN;
Tejun Heo69074832011-11-21 12:32:24 -080064 if (!freezing(current) ||
Tejun Heo8a32c442011-11-21 12:32:23 -080065 (check_kthr_stop && kthread_should_stop()))
Tejun Heo5ece3ea2011-11-21 12:32:26 -080066 current->flags &= ~PF_FROZEN;
67 spin_unlock_irq(&freezer_lock);
68
69 if (!(current->flags & PF_FROZEN))
Matt Helsley8174f152008-10-18 20:27:19 -070070 break;
Tejun Heoa0acae02011-11-21 12:32:22 -080071 was_frozen = true;
Matt Helsley8174f152008-10-18 20:27:19 -070072 schedule();
73 }
Thomas Gleixner6301cb92009-07-17 14:15:47 +020074
Matt Helsley8174f152008-10-18 20:27:19 -070075 pr_debug("%s left refrigerator\n", current->comm);
Tejun Heo50fb4f7f2011-11-21 12:32:22 -080076
77 /*
78 * Restore saved task state before returning. The mb'd version
79 * needs to be used; otherwise, it might silently break
80 * synchronization which depends on ordered task state change.
81 */
82 set_current_state(save);
Tejun Heoa0acae02011-11-21 12:32:22 -080083
84 return was_frozen;
Matt Helsley8174f152008-10-18 20:27:19 -070085}
Tejun Heoa0acae02011-11-21 12:32:22 -080086EXPORT_SYMBOL(__refrigerator);
Matt Helsley8174f152008-10-18 20:27:19 -070087
88static void fake_signal_wake_up(struct task_struct *p)
89{
90 unsigned long flags;
91
Tejun Heo37ad8ac2011-11-21 12:32:26 -080092 if (lock_task_sighand(p, &flags)) {
93 signal_wake_up(p, 0);
94 unlock_task_sighand(p, &flags);
95 }
Matt Helsley8174f152008-10-18 20:27:19 -070096}
97
98/**
Tejun Heo839e3402011-11-21 12:32:26 -080099 * freeze_task - send a freeze request to given task
100 * @p: task to send the request to
Matt Helsley8174f152008-10-18 20:27:19 -0700101 *
Marcos Paulo de Souza37f08be2012-02-21 23:57:47 +0100102 * If @p is freezing, the freeze request is sent either by sending a fake
103 * signal (if it's not a kernel thread) or waking it up (if it's a kernel
104 * thread).
Tejun Heo839e3402011-11-21 12:32:26 -0800105 *
106 * RETURNS:
107 * %false, if @p is not freezing or already frozen; %true, otherwise
Matt Helsley8174f152008-10-18 20:27:19 -0700108 */
Tejun Heo839e3402011-11-21 12:32:26 -0800109bool freeze_task(struct task_struct *p)
Matt Helsley8174f152008-10-18 20:27:19 -0700110{
Tejun Heo0c9af092011-11-21 12:32:24 -0800111 unsigned long flags;
Matt Helsley8174f152008-10-18 20:27:19 -0700112
Tejun Heo0c9af092011-11-21 12:32:24 -0800113 spin_lock_irqsave(&freezer_lock, flags);
Tejun Heoa3201222011-11-21 12:32:25 -0800114 if (!freezing(p) || frozen(p)) {
115 spin_unlock_irqrestore(&freezer_lock, flags);
116 return false;
117 }
Matt Helsley8174f152008-10-18 20:27:19 -0700118
Oleg Nesterov5d8f72b2012-10-26 19:46:06 +0200119 if (!(p->flags & PF_KTHREAD))
Tejun Heo8cfe4002010-11-26 23:07:27 +0100120 fake_signal_wake_up(p);
Oleg Nesterov5d8f72b2012-10-26 19:46:06 +0200121 else
Matt Helsley8174f152008-10-18 20:27:19 -0700122 wake_up_state(p, TASK_INTERRUPTIBLE);
Tejun Heoa3201222011-11-21 12:32:25 -0800123
Tejun Heo0c9af092011-11-21 12:32:24 -0800124 spin_unlock_irqrestore(&freezer_lock, flags);
Tejun Heoa3201222011-11-21 12:32:25 -0800125 return true;
Matt Helsley8174f152008-10-18 20:27:19 -0700126}
127
Tejun Heoa5be2d02011-11-21 12:32:23 -0800128void __thaw_task(struct task_struct *p)
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700129{
Tejun Heo0c9af092011-11-21 12:32:24 -0800130 unsigned long flags;
Tejun Heoa5be2d02011-11-21 12:32:23 -0800131
Tejun Heo69074832011-11-21 12:32:24 -0800132 /*
133 * Clear freezing and kick @p if FROZEN. Clearing is guaranteed to
134 * be visible to @p as waking up implies wmb. Waking up inside
135 * freezer_lock also prevents wakeups from leaking outside
136 * refrigerator.
137 */
Tejun Heo0c9af092011-11-21 12:32:24 -0800138 spin_lock_irqsave(&freezer_lock, flags);
Tejun Heo34b087e2011-11-23 09:28:17 -0800139 if (frozen(p))
Tejun Heoa5be2d02011-11-21 12:32:23 -0800140 wake_up_process(p);
Tejun Heo0c9af092011-11-21 12:32:24 -0800141 spin_unlock_irqrestore(&freezer_lock, flags);
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700142}
Tejun Heo96ee6d82011-11-21 12:32:25 -0800143
144/**
Tejun Heo34b087e2011-11-23 09:28:17 -0800145 * set_freezable - make %current freezable
Tejun Heo96ee6d82011-11-21 12:32:25 -0800146 *
147 * Mark %current freezable and enter refrigerator if necessary.
148 */
Tejun Heo34b087e2011-11-23 09:28:17 -0800149bool set_freezable(void)
Tejun Heo96ee6d82011-11-21 12:32:25 -0800150{
151 might_sleep();
152
153 /*
154 * Modify flags while holding freezer_lock. This ensures the
155 * freezer notices that we aren't frozen yet or the freezing
156 * condition is visible to try_to_freeze() below.
157 */
158 spin_lock_irq(&freezer_lock);
159 current->flags &= ~PF_NOFREEZE;
Tejun Heo96ee6d82011-11-21 12:32:25 -0800160 spin_unlock_irq(&freezer_lock);
161
162 return try_to_freeze();
163}
Tejun Heo34b087e2011-11-23 09:28:17 -0800164EXPORT_SYMBOL(set_freezable);