blob: e70df40d84f6fe83c72f732aa44b454148661b6e [file] [log] [blame]
Nigel Cunningham7dfb7102006-12-06 20:34:23 -08001/* Freezer declarations */
2
Rafael J. Wysocki83144182007-07-17 04:03:35 -07003#ifndef FREEZER_H_INCLUDED
4#define FREEZER_H_INCLUDED
5
Randy Dunlap5c543ef2006-12-10 02:18:58 -08006#include <linux/sched.h>
Rafael J. Wysockie42837b2007-10-18 03:04:45 -07007#include <linux/wait.h>
Tejun Heoa3201222011-11-21 12:32:25 -08008#include <linux/atomic.h>
Randy Dunlap5c543ef2006-12-10 02:18:58 -08009
Matt Helsley8174f152008-10-18 20:27:19 -070010#ifdef CONFIG_FREEZER
Tejun Heoa3201222011-11-21 12:32:25 -080011extern atomic_t system_freezing_cnt; /* nr of freezing conds in effect */
12extern bool pm_freezing; /* PM freezing in effect */
13extern bool pm_nosig_freezing; /* PM nosig freezing in effect */
14
Nigel Cunningham7dfb7102006-12-06 20:34:23 -080015/*
Li Fei957d1282013-02-01 08:56:03 +000016 * Timeout for stopping processes
17 */
18extern unsigned int freeze_timeout_msecs;
19
20/*
Nigel Cunningham7dfb7102006-12-06 20:34:23 -080021 * Check if a process has been frozen
22 */
Tejun Heo948246f2011-11-21 12:32:25 -080023static inline bool frozen(struct task_struct *p)
Nigel Cunningham7dfb7102006-12-06 20:34:23 -080024{
25 return p->flags & PF_FROZEN;
26}
27
Tejun Heoa3201222011-11-21 12:32:25 -080028extern bool freezing_slow_path(struct task_struct *p);
29
Nigel Cunningham7dfb7102006-12-06 20:34:23 -080030/*
31 * Check if there is a request to freeze a process
32 */
Tejun Heoa3201222011-11-21 12:32:25 -080033static inline bool freezing(struct task_struct *p)
Nigel Cunningham7dfb7102006-12-06 20:34:23 -080034{
Tejun Heoa3201222011-11-21 12:32:25 -080035 if (likely(!atomic_read(&system_freezing_cnt)))
36 return false;
37 return freezing_slow_path(p);
Nigel Cunningham7dfb7102006-12-06 20:34:23 -080038}
39
Matt Helsleydc52ddc2008-10-18 20:27:21 -070040/* Takes and releases task alloc lock using task_lock() */
Tejun Heoa5be2d02011-11-21 12:32:23 -080041extern void __thaw_task(struct task_struct *t);
Nigel Cunningham7dfb7102006-12-06 20:34:23 -080042
Tejun Heo8a32c442011-11-21 12:32:23 -080043extern bool __refrigerator(bool check_kthr_stop);
Nigel Cunningham7dfb7102006-12-06 20:34:23 -080044extern int freeze_processes(void);
Rafael J. Wysocki2aede852011-09-26 20:32:27 +020045extern int freeze_kernel_threads(void);
Rafael J. Wysockia9b6f562006-12-06 20:34:37 -080046extern void thaw_processes(void);
Rafael J. Wysocki181e9bd2012-01-29 20:35:52 +010047extern void thaw_kernel_threads(void);
Nigel Cunningham7dfb7102006-12-06 20:34:23 -080048
Tejun Heoa0acae02011-11-21 12:32:22 -080049static inline bool try_to_freeze(void)
Nigel Cunningham7dfb7102006-12-06 20:34:23 -080050{
Tejun Heoa0acae02011-11-21 12:32:22 -080051 might_sleep();
52 if (likely(!freezing(current)))
53 return false;
Tejun Heo8a32c442011-11-21 12:32:23 -080054 return __refrigerator(false);
Nigel Cunningham7dfb7102006-12-06 20:34:23 -080055}
Nigel Cunninghamff395932006-12-06 20:34:28 -080056
Tejun Heo839e3402011-11-21 12:32:26 -080057extern bool freeze_task(struct task_struct *p);
Tejun Heo34b087e2011-11-23 09:28:17 -080058extern bool set_freezable(void);
Matt Helsley8174f152008-10-18 20:27:19 -070059
Matt Helsleydc52ddc2008-10-18 20:27:21 -070060#ifdef CONFIG_CGROUP_FREEZER
Tejun Heo22b4e112011-11-21 12:32:25 -080061extern bool cgroup_freezing(struct task_struct *task);
Matt Helsleydc52ddc2008-10-18 20:27:21 -070062#else /* !CONFIG_CGROUP_FREEZER */
Tejun Heo22b4e112011-11-21 12:32:25 -080063static inline bool cgroup_freezing(struct task_struct *task)
Matt Helsley5a7aadf2010-03-26 23:51:44 +010064{
Tejun Heo22b4e112011-11-21 12:32:25 -080065 return false;
Matt Helsley5a7aadf2010-03-26 23:51:44 +010066}
Matt Helsleydc52ddc2008-10-18 20:27:21 -070067#endif /* !CONFIG_CGROUP_FREEZER */
68
Rafael J. Wysockiba96a0c2007-05-23 13:57:25 -070069/*
70 * The PF_FREEZER_SKIP flag should be set by a vfork parent right before it
71 * calls wait_for_completion(&vfork) and reset right after it returns from this
72 * function. Next, the parent should call try_to_freeze() to freeze itself
73 * appropriately in case the child has exited before the freezing of tasks is
74 * complete. However, we don't want kernel threads to be frozen in unexpected
75 * places, so we allow them to block freeze_processes() instead or to set
Srivatsa S. Bhat467de1f2011-12-06 23:17:51 +010076 * PF_NOFREEZE if needed. Fortunately, in the ____call_usermodehelper() case the
77 * parent won't really block freeze_processes(), since ____call_usermodehelper()
78 * (the child) does a little before exec/exit and it can't be frozen before
79 * waking up the parent.
Rafael J. Wysockiba96a0c2007-05-23 13:57:25 -070080 */
81
Srivatsa S. Bhat467de1f2011-12-06 23:17:51 +010082
Tejun Heodd67d322012-10-16 15:03:14 -070083/**
84 * freezer_do_not_count - tell freezer to ignore %current
85 *
86 * Tell freezers to ignore the current task when determining whether the
87 * target frozen state is reached. IOW, the current task will be
88 * considered frozen enough by freezers.
89 *
90 * The caller shouldn't do anything which isn't allowed for a frozen task
91 * until freezer_cont() is called. Usually, freezer[_do_not]_count() pair
92 * wrap a scheduling operation and nothing much else.
93 */
Rafael J. Wysockiba96a0c2007-05-23 13:57:25 -070094static inline void freezer_do_not_count(void)
95{
Srivatsa S. Bhat467de1f2011-12-06 23:17:51 +010096 current->flags |= PF_FREEZER_SKIP;
Rafael J. Wysockiba96a0c2007-05-23 13:57:25 -070097}
98
Tejun Heodd67d322012-10-16 15:03:14 -070099/**
100 * freezer_count - tell freezer to stop ignoring %current
101 *
102 * Undo freezer_do_not_count(). It tells freezers that %current should be
103 * considered again and tries to freeze if freezing condition is already in
104 * effect.
Rafael J. Wysockiba96a0c2007-05-23 13:57:25 -0700105 */
106static inline void freezer_count(void)
107{
Srivatsa S. Bhat467de1f2011-12-06 23:17:51 +0100108 current->flags &= ~PF_FREEZER_SKIP;
Tejun Heodd67d322012-10-16 15:03:14 -0700109 /*
110 * If freezing is in progress, the following paired with smp_mb()
111 * in freezer_should_skip() ensures that either we see %true
112 * freezing() or freezer_should_skip() sees !PF_FREEZER_SKIP.
113 */
114 smp_mb();
Srivatsa S. Bhat467de1f2011-12-06 23:17:51 +0100115 try_to_freeze();
Rafael J. Wysockiba96a0c2007-05-23 13:57:25 -0700116}
117
Tejun Heodd67d322012-10-16 15:03:14 -0700118/**
119 * freezer_should_skip - whether to skip a task when determining frozen
120 * state is reached
121 * @p: task in quesion
122 *
123 * This function is used by freezers after establishing %true freezing() to
124 * test whether a task should be skipped when determining the target frozen
125 * state is reached. IOW, if this function returns %true, @p is considered
126 * frozen enough.
Rafael J. Wysockiba96a0c2007-05-23 13:57:25 -0700127 */
Tejun Heodd67d322012-10-16 15:03:14 -0700128static inline bool freezer_should_skip(struct task_struct *p)
Rafael J. Wysockiba96a0c2007-05-23 13:57:25 -0700129{
Tejun Heodd67d322012-10-16 15:03:14 -0700130 /*
131 * The following smp_mb() paired with the one in freezer_count()
132 * ensures that either freezer_count() sees %true freezing() or we
133 * see cleared %PF_FREEZER_SKIP and return %false. This makes it
134 * impossible for a task to slip frozen state testing after
135 * clearing %PF_FREEZER_SKIP.
136 */
137 smp_mb();
138 return p->flags & PF_FREEZER_SKIP;
Rafael J. Wysockiba96a0c2007-05-23 13:57:25 -0700139}
Nigel Cunninghamff395932006-12-06 20:34:28 -0800140
Rafael J. Wysocki83144182007-07-17 04:03:35 -0700141/*
Oleg Nesterov5d8f72b2012-10-26 19:46:06 +0200142 * These macros are intended to be used whenever you want allow a sleeping
143 * task to be frozen. Note that neither return any clear indication of
144 * whether a freeze event happened while in this function.
Jeff Laytond3103102011-12-01 22:44:39 +0100145 */
146
147/* Like schedule(), but should not block the freezer. */
148#define freezable_schedule() \
149({ \
150 freezer_do_not_count(); \
151 schedule(); \
152 freezer_count(); \
153})
154
155/* Like schedule_timeout_killable(), but should not block the freezer. */
156#define freezable_schedule_timeout_killable(timeout) \
157({ \
Jeff Laytonb3b73ec2011-12-26 00:29:55 +0100158 long __retval; \
Jeff Laytond3103102011-12-01 22:44:39 +0100159 freezer_do_not_count(); \
Jeff Laytonb3b73ec2011-12-26 00:29:55 +0100160 __retval = schedule_timeout_killable(timeout); \
Jeff Laytond3103102011-12-01 22:44:39 +0100161 freezer_count(); \
Jeff Laytonb3b73ec2011-12-26 00:29:55 +0100162 __retval; \
Jeff Laytond3103102011-12-01 22:44:39 +0100163})
164
165/*
Jeff Laytonf06ac722011-10-19 15:30:40 -0400166 * Freezer-friendly wrappers around wait_event_interruptible(),
167 * wait_event_killable() and wait_event_interruptible_timeout(), originally
168 * defined in <linux/wait.h>
Rafael J. Wysockie42837b2007-10-18 03:04:45 -0700169 */
170
Jeff Laytonf06ac722011-10-19 15:30:40 -0400171#define wait_event_freezekillable(wq, condition) \
172({ \
173 int __retval; \
Oleg Nesterov6f35c4a2011-11-03 16:07:49 -0700174 freezer_do_not_count(); \
175 __retval = wait_event_killable(wq, (condition)); \
176 freezer_count(); \
Jeff Laytonf06ac722011-10-19 15:30:40 -0400177 __retval; \
178})
179
Rafael J. Wysockie42837b2007-10-18 03:04:45 -0700180#define wait_event_freezable(wq, condition) \
181({ \
182 int __retval; \
Oleg Nesterov24b7ead2011-11-23 09:28:17 -0800183 for (;;) { \
Rafael J. Wysockie42837b2007-10-18 03:04:45 -0700184 __retval = wait_event_interruptible(wq, \
185 (condition) || freezing(current)); \
Oleg Nesterov24b7ead2011-11-23 09:28:17 -0800186 if (__retval || (condition)) \
Rafael J. Wysockie42837b2007-10-18 03:04:45 -0700187 break; \
Oleg Nesterov24b7ead2011-11-23 09:28:17 -0800188 try_to_freeze(); \
189 } \
Rafael J. Wysockie42837b2007-10-18 03:04:45 -0700190 __retval; \
191})
192
Rafael J. Wysockie42837b2007-10-18 03:04:45 -0700193#define wait_event_freezable_timeout(wq, condition, timeout) \
194({ \
195 long __retval = timeout; \
Oleg Nesterov24b7ead2011-11-23 09:28:17 -0800196 for (;;) { \
Rafael J. Wysockie42837b2007-10-18 03:04:45 -0700197 __retval = wait_event_interruptible_timeout(wq, \
198 (condition) || freezing(current), \
199 __retval); \
Oleg Nesterov24b7ead2011-11-23 09:28:17 -0800200 if (__retval <= 0 || (condition)) \
201 break; \
202 try_to_freeze(); \
203 } \
Rafael J. Wysockie42837b2007-10-18 03:04:45 -0700204 __retval; \
205})
Oleg Nesterov24b7ead2011-11-23 09:28:17 -0800206
Matt Helsley8174f152008-10-18 20:27:19 -0700207#else /* !CONFIG_FREEZER */
Tejun Heo948246f2011-11-21 12:32:25 -0800208static inline bool frozen(struct task_struct *p) { return false; }
Tejun Heoa3201222011-11-21 12:32:25 -0800209static inline bool freezing(struct task_struct *p) { return false; }
Stephen Rothwell62c9ea62011-11-25 00:44:55 +0100210static inline void __thaw_task(struct task_struct *t) {}
Nigel Cunningham7dfb7102006-12-06 20:34:23 -0800211
Tejun Heo8a32c442011-11-21 12:32:23 -0800212static inline bool __refrigerator(bool check_kthr_stop) { return false; }
Rafael J. Wysocki2aede852011-09-26 20:32:27 +0200213static inline int freeze_processes(void) { return -ENOSYS; }
214static inline int freeze_kernel_threads(void) { return -ENOSYS; }
Nigel Cunningham7dfb7102006-12-06 20:34:23 -0800215static inline void thaw_processes(void) {}
Rafael J. Wysocki181e9bd2012-01-29 20:35:52 +0100216static inline void thaw_kernel_threads(void) {}
Nigel Cunningham7dfb7102006-12-06 20:34:23 -0800217
Li Haifenge5f57622012-11-23 21:55:19 +0100218static inline bool try_to_freeze_nowarn(void) { return false; }
Tejun Heoa0acae02011-11-21 12:32:22 -0800219static inline bool try_to_freeze(void) { return false; }
Nigel Cunningham7dfb7102006-12-06 20:34:23 -0800220
Rafael J. Wysockiba96a0c2007-05-23 13:57:25 -0700221static inline void freezer_do_not_count(void) {}
222static inline void freezer_count(void) {}
223static inline int freezer_should_skip(struct task_struct *p) { return 0; }
Rafael J. Wysocki83144182007-07-17 04:03:35 -0700224static inline void set_freezable(void) {}
Rafael J. Wysockie42837b2007-10-18 03:04:45 -0700225
Jeff Laytond3103102011-12-01 22:44:39 +0100226#define freezable_schedule() schedule()
227
228#define freezable_schedule_timeout_killable(timeout) \
229 schedule_timeout_killable(timeout)
230
Rafael J. Wysockie42837b2007-10-18 03:04:45 -0700231#define wait_event_freezable(wq, condition) \
232 wait_event_interruptible(wq, condition)
233
234#define wait_event_freezable_timeout(wq, condition, timeout) \
235 wait_event_interruptible_timeout(wq, condition, timeout)
236
Steve Frenche0c8ea12011-10-25 10:02:53 -0500237#define wait_event_freezekillable(wq, condition) \
238 wait_event_killable(wq, condition)
239
Matt Helsley8174f152008-10-18 20:27:19 -0700240#endif /* !CONFIG_FREEZER */
Rafael J. Wysocki83144182007-07-17 04:03:35 -0700241
242#endif /* FREEZER_H_INCLUDED */