blob: a4c1a9e63b2e0e835e8a70d9c5380fca71a26c53 [file] [log] [blame]
Arjan van de Ven22a9d642009-01-07 08:45:46 -08001/*
2 * async.c: Asynchronous function calls for boot performance
3 *
4 * (C) Copyright 2009 Intel Corporation
5 * Author: Arjan van de Ven <arjan@linux.intel.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; version 2
10 * of the License.
11 */
12
13
14/*
15
16Goals and Theory of Operation
17
18The primary goal of this feature is to reduce the kernel boot time,
19by doing various independent hardware delays and discovery operations
20decoupled and not strictly serialized.
21
22More specifically, the asynchronous function call concept allows
23certain operations (primarily during system boot) to happen
24asynchronously, out of order, while these operations still
25have their externally visible parts happen sequentially and in-order.
26(not unlike how out-of-order CPUs retire their instructions in order)
27
28Key to the asynchronous function call implementation is the concept of
29a "sequence cookie" (which, although it has an abstracted type, can be
30thought of as a monotonically incrementing number).
31
32The async core will assign each scheduled event such a sequence cookie and
33pass this to the called functions.
34
35The asynchronously called function should before doing a globally visible
36operation, such as registering device numbers, call the
37async_synchronize_cookie() function and pass in its own cookie. The
38async_synchronize_cookie() function will make sure that all asynchronous
39operations that were scheduled prior to the operation corresponding with the
40cookie have completed.
41
42Subsystem/driver initialization code that scheduled asynchronous probe
43functions, but which shares global resources with other drivers/subsystems
44that do not use the asynchronous call feature, need to do a full
45synchronization with the async_synchronize_full() function, before returning
46from their init function. This is to maintain strict ordering between the
47asynchronous and synchronous parts of the kernel.
48
49*/
50
51#include <linux/async.h>
Paul McQuade84c15022011-05-31 20:51:55 +010052#include <linux/atomic.h>
53#include <linux/ktime.h>
Paul Gortmaker9984de12011-05-23 14:51:41 -040054#include <linux/export.h>
Arjan van de Ven22a9d642009-01-07 08:45:46 -080055#include <linux/wait.h>
56#include <linux/sched.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090057#include <linux/slab.h>
Tejun Heo083b8042010-07-02 10:03:52 +020058#include <linux/workqueue.h>
Arjan van de Ven22a9d642009-01-07 08:45:46 -080059
Tejun Heo84b233a2013-01-18 14:05:56 -080060#include "workqueue_internal.h"
61
Arjan van de Ven22a9d642009-01-07 08:45:46 -080062static async_cookie_t next_cookie = 1;
63
Tejun Heoc68eee12013-01-23 09:32:30 -080064#define MAX_WORK 32768
65#define ASYNC_COOKIE_MAX ULLONG_MAX /* infinity cookie */
Arjan van de Ven22a9d642009-01-07 08:45:46 -080066
67static LIST_HEAD(async_pending);
Tejun Heo8723d502013-01-23 09:32:30 -080068static ASYNC_DOMAIN(async_dfl_domain);
Dan Williamsa4683482012-07-09 19:33:30 -070069static LIST_HEAD(async_domains);
Arjan van de Ven22a9d642009-01-07 08:45:46 -080070static DEFINE_SPINLOCK(async_lock);
Dan Williamsa4683482012-07-09 19:33:30 -070071static DEFINE_MUTEX(async_register_mutex);
Arjan van de Ven22a9d642009-01-07 08:45:46 -080072
73struct async_entry {
Tejun Heo083b8042010-07-02 10:03:52 +020074 struct list_head list;
75 struct work_struct work;
76 async_cookie_t cookie;
77 async_func_ptr *func;
78 void *data;
Tejun Heo8723d502013-01-23 09:32:30 -080079 struct async_domain *domain;
Arjan van de Ven22a9d642009-01-07 08:45:46 -080080};
81
82static DECLARE_WAIT_QUEUE_HEAD(async_done);
Arjan van de Ven22a9d642009-01-07 08:45:46 -080083
84static atomic_t entry_count;
Arjan van de Ven22a9d642009-01-07 08:45:46 -080085
Arjan van de Ven22a9d642009-01-07 08:45:46 -080086
87/*
88 * MUST be called with the lock held!
89 */
Tejun Heo8723d502013-01-23 09:32:30 -080090static async_cookie_t __lowest_in_progress(struct async_domain *domain)
Arjan van de Ven22a9d642009-01-07 08:45:46 -080091{
Tejun Heoc68eee12013-01-23 09:32:30 -080092 async_cookie_t first_running = ASYNC_COOKIE_MAX;
93 async_cookie_t first_pending = ASYNC_COOKIE_MAX;
Arjan van de Ven22a9d642009-01-07 08:45:46 -080094 struct async_entry *entry;
James Bottomleyd5a877e2009-05-24 13:03:43 -070095
Tejun Heof56c3192013-01-22 16:15:15 -080096 /*
97 * Both running and pending lists are sorted but not disjoint.
98 * Take the first cookies from both and return the min.
99 */
Tejun Heo8723d502013-01-23 09:32:30 -0800100 if (!list_empty(&domain->running)) {
101 entry = list_first_entry(&domain->running, typeof(*entry), list);
Tejun Heof56c3192013-01-22 16:15:15 -0800102 first_running = entry->cookie;
Arjan van de Ven22a9d642009-01-07 08:45:46 -0800103 }
104
Tejun Heof56c3192013-01-22 16:15:15 -0800105 list_for_each_entry(entry, &async_pending, list) {
Tejun Heo8723d502013-01-23 09:32:30 -0800106 if (entry->domain == domain) {
Tejun Heof56c3192013-01-22 16:15:15 -0800107 first_pending = entry->cookie;
108 break;
109 }
110 }
James Bottomleyd5a877e2009-05-24 13:03:43 -0700111
Tejun Heof56c3192013-01-22 16:15:15 -0800112 return min(first_running, first_pending);
Arjan van de Ven22a9d642009-01-07 08:45:46 -0800113}
Arjan van de Ven37a76bd2009-01-11 15:35:01 +0000114
Tejun Heo8723d502013-01-23 09:32:30 -0800115static async_cookie_t lowest_in_progress(struct async_domain *domain)
Arjan van de Ven37a76bd2009-01-11 15:35:01 +0000116{
117 unsigned long flags;
118 async_cookie_t ret;
119
120 spin_lock_irqsave(&async_lock, flags);
Tejun Heo8723d502013-01-23 09:32:30 -0800121 ret = __lowest_in_progress(domain);
Arjan van de Ven37a76bd2009-01-11 15:35:01 +0000122 spin_unlock_irqrestore(&async_lock, flags);
123 return ret;
124}
Tejun Heo083b8042010-07-02 10:03:52 +0200125
Arjan van de Ven22a9d642009-01-07 08:45:46 -0800126/*
127 * pick the first pending entry and run it
128 */
Tejun Heo083b8042010-07-02 10:03:52 +0200129static void async_run_entry_fn(struct work_struct *work)
Arjan van de Ven22a9d642009-01-07 08:45:46 -0800130{
Tejun Heo083b8042010-07-02 10:03:52 +0200131 struct async_entry *entry =
132 container_of(work, struct async_entry, work);
Tejun Heof56c3192013-01-22 16:15:15 -0800133 struct async_entry *pos;
Arjan van de Ven22a9d642009-01-07 08:45:46 -0800134 unsigned long flags;
Vitaliy Ivanov124ff4e2011-07-07 14:10:40 +0300135 ktime_t uninitialized_var(calltime), delta, rettime;
Tejun Heo8723d502013-01-23 09:32:30 -0800136 struct async_domain *domain = entry->domain;
Arjan van de Ven22a9d642009-01-07 08:45:46 -0800137
Tejun Heof56c3192013-01-22 16:15:15 -0800138 /* 1) move self to the running queue, make sure it stays sorted */
Arjan van de Ven22a9d642009-01-07 08:45:46 -0800139 spin_lock_irqsave(&async_lock, flags);
Tejun Heo8723d502013-01-23 09:32:30 -0800140 list_for_each_entry_reverse(pos, &domain->running, list)
Tejun Heof56c3192013-01-22 16:15:15 -0800141 if (entry->cookie < pos->cookie)
142 break;
143 list_move_tail(&entry->list, &pos->list);
Arjan van de Ven22a9d642009-01-07 08:45:46 -0800144 spin_unlock_irqrestore(&async_lock, flags);
145
Tejun Heo083b8042010-07-02 10:03:52 +0200146 /* 2) run (and print duration) */
Arjan van de Venad160d22009-01-07 09:28:53 -0800147 if (initcall_debug && system_state == SYSTEM_BOOTING) {
Paul McQuade84c15022011-05-31 20:51:55 +0100148 printk(KERN_DEBUG "calling %lli_%pF @ %i\n",
149 (long long)entry->cookie,
Andrew Morton58763a22009-02-04 15:11:58 -0800150 entry->func, task_pid_nr(current));
Arjan van de Ven22a9d642009-01-07 08:45:46 -0800151 calltime = ktime_get();
152 }
153 entry->func(entry->data, entry->cookie);
Arjan van de Venad160d22009-01-07 09:28:53 -0800154 if (initcall_debug && system_state == SYSTEM_BOOTING) {
Arjan van de Ven22a9d642009-01-07 08:45:46 -0800155 rettime = ktime_get();
156 delta = ktime_sub(rettime, calltime);
Paul McQuade84c15022011-05-31 20:51:55 +0100157 printk(KERN_DEBUG "initcall %lli_%pF returned 0 after %lld usecs\n",
Andrew Morton58763a22009-02-04 15:11:58 -0800158 (long long)entry->cookie,
159 entry->func,
160 (long long)ktime_to_ns(delta) >> 10);
Arjan van de Ven22a9d642009-01-07 08:45:46 -0800161 }
162
Tejun Heo083b8042010-07-02 10:03:52 +0200163 /* 3) remove self from the running queue */
Arjan van de Ven22a9d642009-01-07 08:45:46 -0800164 spin_lock_irqsave(&async_lock, flags);
165 list_del(&entry->list);
Tejun Heo8723d502013-01-23 09:32:30 -0800166 if (domain->registered && --domain->count == 0)
167 list_del_init(&domain->node);
Arjan van de Ven22a9d642009-01-07 08:45:46 -0800168
Tejun Heo083b8042010-07-02 10:03:52 +0200169 /* 4) free the entry */
Arjan van de Ven22a9d642009-01-07 08:45:46 -0800170 kfree(entry);
171 atomic_dec(&entry_count);
172
173 spin_unlock_irqrestore(&async_lock, flags);
174
Tejun Heo083b8042010-07-02 10:03:52 +0200175 /* 5) wake up any waiters */
Arjan van de Ven22a9d642009-01-07 08:45:46 -0800176 wake_up(&async_done);
Arjan van de Ven22a9d642009-01-07 08:45:46 -0800177}
178
Tejun Heo8723d502013-01-23 09:32:30 -0800179static async_cookie_t __async_schedule(async_func_ptr *ptr, void *data, struct async_domain *domain)
Arjan van de Ven22a9d642009-01-07 08:45:46 -0800180{
181 struct async_entry *entry;
182 unsigned long flags;
183 async_cookie_t newcookie;
Arjan van de Ven22a9d642009-01-07 08:45:46 -0800184
185 /* allow irq-off callers */
186 entry = kzalloc(sizeof(struct async_entry), GFP_ATOMIC);
187
188 /*
189 * If we're out of memory or if there's too much work
190 * pending already, we execute synchronously.
191 */
Tejun Heo083b8042010-07-02 10:03:52 +0200192 if (!entry || atomic_read(&entry_count) > MAX_WORK) {
Arjan van de Ven22a9d642009-01-07 08:45:46 -0800193 kfree(entry);
194 spin_lock_irqsave(&async_lock, flags);
195 newcookie = next_cookie++;
196 spin_unlock_irqrestore(&async_lock, flags);
197
198 /* low on memory.. run synchronously */
199 ptr(data, newcookie);
200 return newcookie;
201 }
Tejun Heo083b8042010-07-02 10:03:52 +0200202 INIT_WORK(&entry->work, async_run_entry_fn);
Arjan van de Ven22a9d642009-01-07 08:45:46 -0800203 entry->func = ptr;
204 entry->data = data;
Tejun Heo8723d502013-01-23 09:32:30 -0800205 entry->domain = domain;
Arjan van de Ven22a9d642009-01-07 08:45:46 -0800206
207 spin_lock_irqsave(&async_lock, flags);
208 newcookie = entry->cookie = next_cookie++;
209 list_add_tail(&entry->list, &async_pending);
Tejun Heo8723d502013-01-23 09:32:30 -0800210 if (domain->registered && domain->count++ == 0)
211 list_add_tail(&domain->node, &async_domains);
Arjan van de Ven22a9d642009-01-07 08:45:46 -0800212 atomic_inc(&entry_count);
213 spin_unlock_irqrestore(&async_lock, flags);
Tejun Heo083b8042010-07-02 10:03:52 +0200214
Tejun Heo774a1222013-01-15 18:52:51 -0800215 /* mark that this task has queued an async job, used by module init */
216 current->flags |= PF_USED_ASYNC;
217
Tejun Heo083b8042010-07-02 10:03:52 +0200218 /* schedule for execution */
219 queue_work(system_unbound_wq, &entry->work);
220
Arjan van de Ven22a9d642009-01-07 08:45:46 -0800221 return newcookie;
222}
223
Cornelia Huckf30d5b32009-01-19 13:45:33 +0100224/**
225 * async_schedule - schedule a function for asynchronous execution
226 * @ptr: function to execute asynchronously
227 * @data: data pointer to pass to the function
228 *
229 * Returns an async_cookie_t that may be used for checkpointing later.
230 * Note: This function may be called from atomic or non-atomic contexts.
231 */
Arjan van de Ven22a9d642009-01-07 08:45:46 -0800232async_cookie_t async_schedule(async_func_ptr *ptr, void *data)
233{
Tejun Heo8723d502013-01-23 09:32:30 -0800234 return __async_schedule(ptr, data, &async_dfl_domain);
Arjan van de Ven22a9d642009-01-07 08:45:46 -0800235}
236EXPORT_SYMBOL_GPL(async_schedule);
237
Cornelia Huckf30d5b32009-01-19 13:45:33 +0100238/**
Cornelia Huck766ccb92009-01-20 15:31:31 +0100239 * async_schedule_domain - schedule a function for asynchronous execution within a certain domain
Cornelia Huckf30d5b32009-01-19 13:45:33 +0100240 * @ptr: function to execute asynchronously
241 * @data: data pointer to pass to the function
Tejun Heo8723d502013-01-23 09:32:30 -0800242 * @domain: the domain
Cornelia Huckf30d5b32009-01-19 13:45:33 +0100243 *
244 * Returns an async_cookie_t that may be used for checkpointing later.
Tejun Heo8723d502013-01-23 09:32:30 -0800245 * @domain may be used in the async_synchronize_*_domain() functions to
246 * wait within a certain synchronization domain rather than globally. A
247 * synchronization domain is specified via @domain. Note: This function
248 * may be called from atomic or non-atomic contexts.
Cornelia Huckf30d5b32009-01-19 13:45:33 +0100249 */
Cornelia Huck766ccb92009-01-20 15:31:31 +0100250async_cookie_t async_schedule_domain(async_func_ptr *ptr, void *data,
Tejun Heo8723d502013-01-23 09:32:30 -0800251 struct async_domain *domain)
Arjan van de Ven22a9d642009-01-07 08:45:46 -0800252{
Tejun Heo8723d502013-01-23 09:32:30 -0800253 return __async_schedule(ptr, data, domain);
Arjan van de Ven22a9d642009-01-07 08:45:46 -0800254}
Cornelia Huck766ccb92009-01-20 15:31:31 +0100255EXPORT_SYMBOL_GPL(async_schedule_domain);
Arjan van de Ven22a9d642009-01-07 08:45:46 -0800256
Cornelia Huckf30d5b32009-01-19 13:45:33 +0100257/**
258 * async_synchronize_full - synchronize all asynchronous function calls
259 *
260 * This function waits until all asynchronous function calls have been done.
261 */
Arjan van de Ven22a9d642009-01-07 08:45:46 -0800262void async_synchronize_full(void)
263{
Dan Williamsa4683482012-07-09 19:33:30 -0700264 mutex_lock(&async_register_mutex);
Arjan van de Ven33b04b92009-01-08 12:35:11 -0800265 do {
Dan Williamsa4683482012-07-09 19:33:30 -0700266 struct async_domain *domain = NULL;
267
268 spin_lock_irq(&async_lock);
269 if (!list_empty(&async_domains))
270 domain = list_first_entry(&async_domains, typeof(*domain), node);
271 spin_unlock_irq(&async_lock);
272
Tejun Heoc68eee12013-01-23 09:32:30 -0800273 async_synchronize_cookie_domain(ASYNC_COOKIE_MAX, domain);
Dan Williamsa4683482012-07-09 19:33:30 -0700274 } while (!list_empty(&async_domains));
275 mutex_unlock(&async_register_mutex);
Arjan van de Ven22a9d642009-01-07 08:45:46 -0800276}
277EXPORT_SYMBOL_GPL(async_synchronize_full);
278
Cornelia Huckf30d5b32009-01-19 13:45:33 +0100279/**
Dan Williamsa4683482012-07-09 19:33:30 -0700280 * async_unregister_domain - ensure no more anonymous waiters on this domain
281 * @domain: idle domain to flush out of any async_synchronize_full instances
282 *
283 * async_synchronize_{cookie|full}_domain() are not flushed since callers
284 * of these routines should know the lifetime of @domain
285 *
286 * Prefer ASYNC_DOMAIN_EXCLUSIVE() declarations over flushing
287 */
288void async_unregister_domain(struct async_domain *domain)
289{
290 mutex_lock(&async_register_mutex);
291 spin_lock_irq(&async_lock);
292 WARN_ON(!domain->registered || !list_empty(&domain->node) ||
Tejun Heo8723d502013-01-23 09:32:30 -0800293 !list_empty(&domain->running));
Dan Williamsa4683482012-07-09 19:33:30 -0700294 domain->registered = 0;
295 spin_unlock_irq(&async_lock);
296 mutex_unlock(&async_register_mutex);
297}
298EXPORT_SYMBOL_GPL(async_unregister_domain);
299
300/**
Cornelia Huck766ccb92009-01-20 15:31:31 +0100301 * async_synchronize_full_domain - synchronize all asynchronous function within a certain domain
Tejun Heo8723d502013-01-23 09:32:30 -0800302 * @domain: the domain to synchronize
Cornelia Huckf30d5b32009-01-19 13:45:33 +0100303 *
Cornelia Huck766ccb92009-01-20 15:31:31 +0100304 * This function waits until all asynchronous function calls for the
Tejun Heo8723d502013-01-23 09:32:30 -0800305 * synchronization domain specified by @domain have been done.
Cornelia Huckf30d5b32009-01-19 13:45:33 +0100306 */
Dan Williams2955b472012-07-09 19:33:25 -0700307void async_synchronize_full_domain(struct async_domain *domain)
Arjan van de Ven22a9d642009-01-07 08:45:46 -0800308{
Tejun Heoc68eee12013-01-23 09:32:30 -0800309 async_synchronize_cookie_domain(ASYNC_COOKIE_MAX, domain);
Arjan van de Ven22a9d642009-01-07 08:45:46 -0800310}
Cornelia Huck766ccb92009-01-20 15:31:31 +0100311EXPORT_SYMBOL_GPL(async_synchronize_full_domain);
Arjan van de Ven22a9d642009-01-07 08:45:46 -0800312
Cornelia Huckf30d5b32009-01-19 13:45:33 +0100313/**
Cornelia Huck766ccb92009-01-20 15:31:31 +0100314 * async_synchronize_cookie_domain - synchronize asynchronous function calls within a certain domain with cookie checkpointing
Cornelia Huckf30d5b32009-01-19 13:45:33 +0100315 * @cookie: async_cookie_t to use as checkpoint
Tejun Heo8723d502013-01-23 09:32:30 -0800316 * @domain: the domain to synchronize
Cornelia Huckf30d5b32009-01-19 13:45:33 +0100317 *
Cornelia Huck766ccb92009-01-20 15:31:31 +0100318 * This function waits until all asynchronous function calls for the
Tejun Heo8723d502013-01-23 09:32:30 -0800319 * synchronization domain specified by @domain submitted prior to @cookie
320 * have been done.
Cornelia Huckf30d5b32009-01-19 13:45:33 +0100321 */
Tejun Heo8723d502013-01-23 09:32:30 -0800322void async_synchronize_cookie_domain(async_cookie_t cookie, struct async_domain *domain)
Arjan van de Ven22a9d642009-01-07 08:45:46 -0800323{
Vitaliy Ivanov124ff4e2011-07-07 14:10:40 +0300324 ktime_t uninitialized_var(starttime), delta, endtime;
Arjan van de Ven22a9d642009-01-07 08:45:46 -0800325
Tejun Heo8723d502013-01-23 09:32:30 -0800326 if (!domain)
Dan Williamsa4683482012-07-09 19:33:30 -0700327 return;
328
Arjan van de Venad160d22009-01-07 09:28:53 -0800329 if (initcall_debug && system_state == SYSTEM_BOOTING) {
Paul McQuade84c15022011-05-31 20:51:55 +0100330 printk(KERN_DEBUG "async_waiting @ %i\n", task_pid_nr(current));
Arjan van de Ven22a9d642009-01-07 08:45:46 -0800331 starttime = ktime_get();
332 }
333
Tejun Heo8723d502013-01-23 09:32:30 -0800334 wait_event(async_done, lowest_in_progress(domain) >= cookie);
Arjan van de Ven22a9d642009-01-07 08:45:46 -0800335
Arjan van de Venad160d22009-01-07 09:28:53 -0800336 if (initcall_debug && system_state == SYSTEM_BOOTING) {
Arjan van de Ven22a9d642009-01-07 08:45:46 -0800337 endtime = ktime_get();
338 delta = ktime_sub(endtime, starttime);
339
Paul McQuade84c15022011-05-31 20:51:55 +0100340 printk(KERN_DEBUG "async_continuing @ %i after %lli usec\n",
Andrew Morton58763a22009-02-04 15:11:58 -0800341 task_pid_nr(current),
342 (long long)ktime_to_ns(delta) >> 10);
Arjan van de Ven22a9d642009-01-07 08:45:46 -0800343 }
344}
Cornelia Huck766ccb92009-01-20 15:31:31 +0100345EXPORT_SYMBOL_GPL(async_synchronize_cookie_domain);
Arjan van de Ven22a9d642009-01-07 08:45:46 -0800346
Cornelia Huckf30d5b32009-01-19 13:45:33 +0100347/**
348 * async_synchronize_cookie - synchronize asynchronous function calls with cookie checkpointing
349 * @cookie: async_cookie_t to use as checkpoint
350 *
351 * This function waits until all asynchronous function calls prior to @cookie
352 * have been done.
353 */
Arjan van de Ven22a9d642009-01-07 08:45:46 -0800354void async_synchronize_cookie(async_cookie_t cookie)
355{
Tejun Heo8723d502013-01-23 09:32:30 -0800356 async_synchronize_cookie_domain(cookie, &async_dfl_domain);
Arjan van de Ven22a9d642009-01-07 08:45:46 -0800357}
358EXPORT_SYMBOL_GPL(async_synchronize_cookie);
Tejun Heo84b233a2013-01-18 14:05:56 -0800359
360/**
361 * current_is_async - is %current an async worker task?
362 *
363 * Returns %true if %current is an async worker task.
364 */
365bool current_is_async(void)
366{
367 struct worker *worker = current_wq_worker();
368
369 return worker && worker->current_func == async_run_entry_fn;
370}