Commit | Line | Data |
---|---|---|
22a9d645 AV |
1 | /* |
2 | * async.c: Asynchronous function calls for boot performance | |
3 | * | |
4 | * (C) Copyright 2009 Intel Corporation | |
5 | * Author: Arjan van de Ven <arjan@linux.intel.com> | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or | |
8 | * modify it under the terms of the GNU General Public License | |
9 | * as published by the Free Software Foundation; version 2 | |
10 | * of the License. | |
11 | */ | |
12 | ||
13 | ||
14 | /* | |
15 | ||
16 | Goals and Theory of Operation | |
17 | ||
18 | The primary goal of this feature is to reduce the kernel boot time, | |
19 | by doing various independent hardware delays and discovery operations | |
20 | decoupled and not strictly serialized. | |
21 | ||
22 | More specifically, the asynchronous function call concept allows | |
23 | certain operations (primarily during system boot) to happen | |
24 | asynchronously, out of order, while these operations still | |
25 | have their externally visible parts happen sequentially and in-order. | |
26 | (not unlike how out-of-order CPUs retire their instructions in order) | |
27 | ||
28 | Key to the asynchronous function call implementation is the concept of | |
29 | a "sequence cookie" (which, although it has an abstracted type, can be | |
30 | thought of as a monotonically incrementing number). | |
31 | ||
32 | The async core will assign each scheduled event such a sequence cookie and | |
33 | pass this to the called functions. | |
34 | ||
35 | The asynchronously called function should before doing a globally visible | |
36 | operation, such as registering device numbers, call the | |
37 | async_synchronize_cookie() function and pass in its own cookie. The | |
38 | async_synchronize_cookie() function will make sure that all asynchronous | |
39 | operations that were scheduled prior to the operation corresponding with the | |
40 | cookie have completed. | |
41 | ||
42 | Subsystem/driver initialization code that scheduled asynchronous probe | |
43 | functions, but which shares global resources with other drivers/subsystems | |
44 | that do not use the asynchronous call feature, need to do a full | |
45 | synchronization with the async_synchronize_full() function, before returning | |
46 | from their init function. This is to maintain strict ordering between the | |
47 | asynchronous and synchronous parts of the kernel. | |
48 | ||
49 | */ | |
50 | ||
51 | #include <linux/async.h> | |
84c15027 PM |
52 | #include <linux/atomic.h> |
53 | #include <linux/ktime.h> | |
9984de1a | 54 | #include <linux/export.h> |
22a9d645 AV |
55 | #include <linux/wait.h> |
56 | #include <linux/sched.h> | |
5a0e3ad6 | 57 | #include <linux/slab.h> |
083b804c | 58 | #include <linux/workqueue.h> |
22a9d645 | 59 | |
84b233ad TH |
60 | #include "workqueue_internal.h" |
61 | ||
22a9d645 AV |
62 | static async_cookie_t next_cookie = 1; |
63 | ||
22a9d645 AV |
64 | #define MAX_WORK 32768 |
65 | ||
66 | static LIST_HEAD(async_pending); | |
8723d503 | 67 | static ASYNC_DOMAIN(async_dfl_domain); |
a4683487 | 68 | static LIST_HEAD(async_domains); |
22a9d645 | 69 | static DEFINE_SPINLOCK(async_lock); |
a4683487 | 70 | static DEFINE_MUTEX(async_register_mutex); |
22a9d645 AV |
71 | |
72 | struct async_entry { | |
083b804c TH |
73 | struct list_head list; |
74 | struct work_struct work; | |
75 | async_cookie_t cookie; | |
76 | async_func_ptr *func; | |
77 | void *data; | |
8723d503 | 78 | struct async_domain *domain; |
22a9d645 AV |
79 | }; |
80 | ||
81 | static DECLARE_WAIT_QUEUE_HEAD(async_done); | |
22a9d645 AV |
82 | |
83 | static atomic_t entry_count; | |
22a9d645 | 84 | |
22a9d645 AV |
85 | |
86 | /* | |
87 | * MUST be called with the lock held! | |
88 | */ | |
8723d503 | 89 | static async_cookie_t __lowest_in_progress(struct async_domain *domain) |
22a9d645 | 90 | { |
f56c3196 TH |
91 | async_cookie_t first_running = next_cookie; /* infinity value */ |
92 | async_cookie_t first_pending = next_cookie; /* ditto */ | |
22a9d645 | 93 | struct async_entry *entry; |
d5a877e8 | 94 | |
f56c3196 TH |
95 | /* |
96 | * Both running and pending lists are sorted but not disjoint. | |
97 | * Take the first cookies from both and return the min. | |
98 | */ | |
8723d503 TH |
99 | if (!list_empty(&domain->running)) { |
100 | entry = list_first_entry(&domain->running, typeof(*entry), list); | |
f56c3196 | 101 | first_running = entry->cookie; |
22a9d645 AV |
102 | } |
103 | ||
f56c3196 | 104 | list_for_each_entry(entry, &async_pending, list) { |
8723d503 | 105 | if (entry->domain == domain) { |
f56c3196 TH |
106 | first_pending = entry->cookie; |
107 | break; | |
108 | } | |
109 | } | |
d5a877e8 | 110 | |
f56c3196 | 111 | return min(first_running, first_pending); |
22a9d645 | 112 | } |
37a76bd4 | 113 | |
8723d503 | 114 | static async_cookie_t lowest_in_progress(struct async_domain *domain) |
37a76bd4 AV |
115 | { |
116 | unsigned long flags; | |
117 | async_cookie_t ret; | |
118 | ||
119 | spin_lock_irqsave(&async_lock, flags); | |
8723d503 | 120 | ret = __lowest_in_progress(domain); |
37a76bd4 AV |
121 | spin_unlock_irqrestore(&async_lock, flags); |
122 | return ret; | |
123 | } | |
083b804c | 124 | |
22a9d645 AV |
125 | /* |
126 | * pick the first pending entry and run it | |
127 | */ | |
083b804c | 128 | static void async_run_entry_fn(struct work_struct *work) |
22a9d645 | 129 | { |
083b804c TH |
130 | struct async_entry *entry = |
131 | container_of(work, struct async_entry, work); | |
f56c3196 | 132 | struct async_entry *pos; |
22a9d645 | 133 | unsigned long flags; |
124ff4e5 | 134 | ktime_t uninitialized_var(calltime), delta, rettime; |
8723d503 | 135 | struct async_domain *domain = entry->domain; |
22a9d645 | 136 | |
f56c3196 | 137 | /* 1) move self to the running queue, make sure it stays sorted */ |
22a9d645 | 138 | spin_lock_irqsave(&async_lock, flags); |
8723d503 | 139 | list_for_each_entry_reverse(pos, &domain->running, list) |
f56c3196 TH |
140 | if (entry->cookie < pos->cookie) |
141 | break; | |
142 | list_move_tail(&entry->list, &pos->list); | |
22a9d645 AV |
143 | spin_unlock_irqrestore(&async_lock, flags); |
144 | ||
083b804c | 145 | /* 2) run (and print duration) */ |
ad160d23 | 146 | if (initcall_debug && system_state == SYSTEM_BOOTING) { |
84c15027 PM |
147 | printk(KERN_DEBUG "calling %lli_%pF @ %i\n", |
148 | (long long)entry->cookie, | |
58763a29 | 149 | entry->func, task_pid_nr(current)); |
22a9d645 AV |
150 | calltime = ktime_get(); |
151 | } | |
152 | entry->func(entry->data, entry->cookie); | |
ad160d23 | 153 | if (initcall_debug && system_state == SYSTEM_BOOTING) { |
22a9d645 AV |
154 | rettime = ktime_get(); |
155 | delta = ktime_sub(rettime, calltime); | |
84c15027 | 156 | printk(KERN_DEBUG "initcall %lli_%pF returned 0 after %lld usecs\n", |
58763a29 AM |
157 | (long long)entry->cookie, |
158 | entry->func, | |
159 | (long long)ktime_to_ns(delta) >> 10); | |
22a9d645 AV |
160 | } |
161 | ||
083b804c | 162 | /* 3) remove self from the running queue */ |
22a9d645 AV |
163 | spin_lock_irqsave(&async_lock, flags); |
164 | list_del(&entry->list); | |
8723d503 TH |
165 | if (domain->registered && --domain->count == 0) |
166 | list_del_init(&domain->node); | |
22a9d645 | 167 | |
083b804c | 168 | /* 4) free the entry */ |
22a9d645 AV |
169 | kfree(entry); |
170 | atomic_dec(&entry_count); | |
171 | ||
172 | spin_unlock_irqrestore(&async_lock, flags); | |
173 | ||
083b804c | 174 | /* 5) wake up any waiters */ |
22a9d645 | 175 | wake_up(&async_done); |
22a9d645 AV |
176 | } |
177 | ||
8723d503 | 178 | static async_cookie_t __async_schedule(async_func_ptr *ptr, void *data, struct async_domain *domain) |
22a9d645 AV |
179 | { |
180 | struct async_entry *entry; | |
181 | unsigned long flags; | |
182 | async_cookie_t newcookie; | |
22a9d645 AV |
183 | |
184 | /* allow irq-off callers */ | |
185 | entry = kzalloc(sizeof(struct async_entry), GFP_ATOMIC); | |
186 | ||
187 | /* | |
188 | * If we're out of memory or if there's too much work | |
189 | * pending already, we execute synchronously. | |
190 | */ | |
083b804c | 191 | if (!entry || atomic_read(&entry_count) > MAX_WORK) { |
22a9d645 AV |
192 | kfree(entry); |
193 | spin_lock_irqsave(&async_lock, flags); | |
194 | newcookie = next_cookie++; | |
195 | spin_unlock_irqrestore(&async_lock, flags); | |
196 | ||
197 | /* low on memory.. run synchronously */ | |
198 | ptr(data, newcookie); | |
199 | return newcookie; | |
200 | } | |
083b804c | 201 | INIT_WORK(&entry->work, async_run_entry_fn); |
22a9d645 AV |
202 | entry->func = ptr; |
203 | entry->data = data; | |
8723d503 | 204 | entry->domain = domain; |
22a9d645 AV |
205 | |
206 | spin_lock_irqsave(&async_lock, flags); | |
207 | newcookie = entry->cookie = next_cookie++; | |
208 | list_add_tail(&entry->list, &async_pending); | |
8723d503 TH |
209 | if (domain->registered && domain->count++ == 0) |
210 | list_add_tail(&domain->node, &async_domains); | |
22a9d645 AV |
211 | atomic_inc(&entry_count); |
212 | spin_unlock_irqrestore(&async_lock, flags); | |
083b804c | 213 | |
774a1221 TH |
214 | /* mark that this task has queued an async job, used by module init */ |
215 | current->flags |= PF_USED_ASYNC; | |
216 | ||
083b804c TH |
217 | /* schedule for execution */ |
218 | queue_work(system_unbound_wq, &entry->work); | |
219 | ||
22a9d645 AV |
220 | return newcookie; |
221 | } | |
222 | ||
f30d5b30 CH |
223 | /** |
224 | * async_schedule - schedule a function for asynchronous execution | |
225 | * @ptr: function to execute asynchronously | |
226 | * @data: data pointer to pass to the function | |
227 | * | |
228 | * Returns an async_cookie_t that may be used for checkpointing later. | |
229 | * Note: This function may be called from atomic or non-atomic contexts. | |
230 | */ | |
22a9d645 AV |
231 | async_cookie_t async_schedule(async_func_ptr *ptr, void *data) |
232 | { | |
8723d503 | 233 | return __async_schedule(ptr, data, &async_dfl_domain); |
22a9d645 AV |
234 | } |
235 | EXPORT_SYMBOL_GPL(async_schedule); | |
236 | ||
f30d5b30 | 237 | /** |
766ccb9e | 238 | * async_schedule_domain - schedule a function for asynchronous execution within a certain domain |
f30d5b30 CH |
239 | * @ptr: function to execute asynchronously |
240 | * @data: data pointer to pass to the function | |
8723d503 | 241 | * @domain: the domain |
f30d5b30 CH |
242 | * |
243 | * Returns an async_cookie_t that may be used for checkpointing later. | |
8723d503 TH |
244 | * @domain may be used in the async_synchronize_*_domain() functions to |
245 | * wait within a certain synchronization domain rather than globally. A | |
246 | * synchronization domain is specified via @domain. Note: This function | |
247 | * may be called from atomic or non-atomic contexts. | |
f30d5b30 | 248 | */ |
766ccb9e | 249 | async_cookie_t async_schedule_domain(async_func_ptr *ptr, void *data, |
8723d503 | 250 | struct async_domain *domain) |
22a9d645 | 251 | { |
8723d503 | 252 | return __async_schedule(ptr, data, domain); |
22a9d645 | 253 | } |
766ccb9e | 254 | EXPORT_SYMBOL_GPL(async_schedule_domain); |
22a9d645 | 255 | |
f30d5b30 CH |
256 | /** |
257 | * async_synchronize_full - synchronize all asynchronous function calls | |
258 | * | |
259 | * This function waits until all asynchronous function calls have been done. | |
260 | */ | |
22a9d645 AV |
261 | void async_synchronize_full(void) |
262 | { | |
a4683487 | 263 | mutex_lock(&async_register_mutex); |
33b04b93 | 264 | do { |
a4683487 DW |
265 | struct async_domain *domain = NULL; |
266 | ||
267 | spin_lock_irq(&async_lock); | |
268 | if (!list_empty(&async_domains)) | |
269 | domain = list_first_entry(&async_domains, typeof(*domain), node); | |
270 | spin_unlock_irq(&async_lock); | |
271 | ||
272 | async_synchronize_cookie_domain(next_cookie, domain); | |
273 | } while (!list_empty(&async_domains)); | |
274 | mutex_unlock(&async_register_mutex); | |
22a9d645 AV |
275 | } |
276 | EXPORT_SYMBOL_GPL(async_synchronize_full); | |
277 | ||
a4683487 DW |
278 | /** |
279 | * async_unregister_domain - ensure no more anonymous waiters on this domain | |
280 | * @domain: idle domain to flush out of any async_synchronize_full instances | |
281 | * | |
282 | * async_synchronize_{cookie|full}_domain() are not flushed since callers | |
283 | * of these routines should know the lifetime of @domain | |
284 | * | |
285 | * Prefer ASYNC_DOMAIN_EXCLUSIVE() declarations over flushing | |
286 | */ | |
287 | void async_unregister_domain(struct async_domain *domain) | |
288 | { | |
289 | mutex_lock(&async_register_mutex); | |
290 | spin_lock_irq(&async_lock); | |
291 | WARN_ON(!domain->registered || !list_empty(&domain->node) || | |
8723d503 | 292 | !list_empty(&domain->running)); |
a4683487 DW |
293 | domain->registered = 0; |
294 | spin_unlock_irq(&async_lock); | |
295 | mutex_unlock(&async_register_mutex); | |
296 | } | |
297 | EXPORT_SYMBOL_GPL(async_unregister_domain); | |
298 | ||
f30d5b30 | 299 | /** |
766ccb9e | 300 | * async_synchronize_full_domain - synchronize all asynchronous function within a certain domain |
8723d503 | 301 | * @domain: the domain to synchronize |
f30d5b30 | 302 | * |
766ccb9e | 303 | * This function waits until all asynchronous function calls for the |
8723d503 | 304 | * synchronization domain specified by @domain have been done. |
f30d5b30 | 305 | */ |
2955b47d | 306 | void async_synchronize_full_domain(struct async_domain *domain) |
22a9d645 | 307 | { |
2955b47d | 308 | async_synchronize_cookie_domain(next_cookie, domain); |
22a9d645 | 309 | } |
766ccb9e | 310 | EXPORT_SYMBOL_GPL(async_synchronize_full_domain); |
22a9d645 | 311 | |
f30d5b30 | 312 | /** |
766ccb9e | 313 | * async_synchronize_cookie_domain - synchronize asynchronous function calls within a certain domain with cookie checkpointing |
f30d5b30 | 314 | * @cookie: async_cookie_t to use as checkpoint |
8723d503 | 315 | * @domain: the domain to synchronize |
f30d5b30 | 316 | * |
766ccb9e | 317 | * This function waits until all asynchronous function calls for the |
8723d503 TH |
318 | * synchronization domain specified by @domain submitted prior to @cookie |
319 | * have been done. | |
f30d5b30 | 320 | */ |
8723d503 | 321 | void async_synchronize_cookie_domain(async_cookie_t cookie, struct async_domain *domain) |
22a9d645 | 322 | { |
124ff4e5 | 323 | ktime_t uninitialized_var(starttime), delta, endtime; |
22a9d645 | 324 | |
8723d503 | 325 | if (!domain) |
a4683487 DW |
326 | return; |
327 | ||
ad160d23 | 328 | if (initcall_debug && system_state == SYSTEM_BOOTING) { |
84c15027 | 329 | printk(KERN_DEBUG "async_waiting @ %i\n", task_pid_nr(current)); |
22a9d645 AV |
330 | starttime = ktime_get(); |
331 | } | |
332 | ||
8723d503 | 333 | wait_event(async_done, lowest_in_progress(domain) >= cookie); |
22a9d645 | 334 | |
ad160d23 | 335 | if (initcall_debug && system_state == SYSTEM_BOOTING) { |
22a9d645 AV |
336 | endtime = ktime_get(); |
337 | delta = ktime_sub(endtime, starttime); | |
338 | ||
84c15027 | 339 | printk(KERN_DEBUG "async_continuing @ %i after %lli usec\n", |
58763a29 AM |
340 | task_pid_nr(current), |
341 | (long long)ktime_to_ns(delta) >> 10); | |
22a9d645 AV |
342 | } |
343 | } | |
766ccb9e | 344 | EXPORT_SYMBOL_GPL(async_synchronize_cookie_domain); |
22a9d645 | 345 | |
f30d5b30 CH |
346 | /** |
347 | * async_synchronize_cookie - synchronize asynchronous function calls with cookie checkpointing | |
348 | * @cookie: async_cookie_t to use as checkpoint | |
349 | * | |
350 | * This function waits until all asynchronous function calls prior to @cookie | |
351 | * have been done. | |
352 | */ | |
22a9d645 AV |
353 | void async_synchronize_cookie(async_cookie_t cookie) |
354 | { | |
8723d503 | 355 | async_synchronize_cookie_domain(cookie, &async_dfl_domain); |
22a9d645 AV |
356 | } |
357 | EXPORT_SYMBOL_GPL(async_synchronize_cookie); | |
84b233ad TH |
358 | |
359 | /** | |
360 | * current_is_async - is %current an async worker task? | |
361 | * | |
362 | * Returns %true if %current is an async worker task. | |
363 | */ | |
364 | bool current_is_async(void) | |
365 | { | |
366 | struct worker *worker = current_wq_worker(); | |
367 | ||
368 | return worker && worker->current_func == async_run_entry_fn; | |
369 | } |