Commit | Line | Data |
---|---|---|
b886d83c | 1 | // SPDX-License-Identifier: GPL-2.0-only |
22a9d645 AV |
2 | /* |
3 | * async.c: Asynchronous function calls for boot performance | |
4 | * | |
5 | * (C) Copyright 2009 Intel Corporation | |
6 | * Author: Arjan van de Ven <arjan@linux.intel.com> | |
22a9d645 AV |
7 | */ |
8 | ||
9 | ||
10 | /* | |
11 | ||
12 | Goals and Theory of Operation | |
13 | ||
14 | The primary goal of this feature is to reduce the kernel boot time, | |
15 | by doing various independent hardware delays and discovery operations | |
16 | decoupled and not strictly serialized. | |
17 | ||
18 | More specifically, the asynchronous function call concept allows | |
19 | certain operations (primarily during system boot) to happen | |
20 | asynchronously, out of order, while these operations still | |
21 | have their externally visible parts happen sequentially and in-order. | |
22 | (not unlike how out-of-order CPUs retire their instructions in order) | |
23 | ||
24 | Key to the asynchronous function call implementation is the concept of | |
25 | a "sequence cookie" (which, although it has an abstracted type, can be | |
26 | thought of as a monotonically incrementing number). | |
27 | ||
28 | The async core will assign each scheduled event such a sequence cookie and | |
29 | pass this to the called functions. | |
30 | ||
31 | The asynchronously called function should before doing a globally visible | |
32 | operation, such as registering device numbers, call the | |
33 | async_synchronize_cookie() function and pass in its own cookie. The | |
34 | async_synchronize_cookie() function will make sure that all asynchronous | |
35 | operations that were scheduled prior to the operation corresponding with the | |
36 | cookie have completed. | |
37 | ||
38 | Subsystem/driver initialization code that scheduled asynchronous probe | |
39 | functions, but which shares global resources with other drivers/subsystems | |
40 | that do not use the asynchronous call feature, need to do a full | |
41 | synchronization with the async_synchronize_full() function, before returning | |
42 | from their init function. This is to maintain strict ordering between the | |
43 | asynchronous and synchronous parts of the kernel. | |
44 | ||
45 | */ | |
46 | ||
47 | #include <linux/async.h> | |
84c15027 | 48 | #include <linux/atomic.h> |
9984de1a | 49 | #include <linux/export.h> |
f551103c KO |
50 | #include <linux/ktime.h> |
51 | #include <linux/pid.h> | |
22a9d645 | 52 | #include <linux/sched.h> |
5a0e3ad6 | 53 | #include <linux/slab.h> |
f551103c | 54 | #include <linux/wait.h> |
083b804c | 55 | #include <linux/workqueue.h> |
22a9d645 | 56 | |
84b233ad TH |
57 | #include "workqueue_internal.h" |
58 | ||
22a9d645 AV |
59 | static async_cookie_t next_cookie = 1; |
60 | ||
c68eee14 TH |
61 | #define MAX_WORK 32768 |
62 | #define ASYNC_COOKIE_MAX ULLONG_MAX /* infinity cookie */ | |
22a9d645 | 63 | |
9fdb04cd | 64 | static LIST_HEAD(async_global_pending); /* pending from all registered doms */ |
8723d503 | 65 | static ASYNC_DOMAIN(async_dfl_domain); |
22a9d645 | 66 | static DEFINE_SPINLOCK(async_lock); |
bf52b1ac | 67 | static struct workqueue_struct *async_wq; |
22a9d645 AV |
68 | |
69 | struct async_entry { | |
9fdb04cd TH |
70 | struct list_head domain_list; |
71 | struct list_head global_list; | |
083b804c TH |
72 | struct work_struct work; |
73 | async_cookie_t cookie; | |
362f2b09 | 74 | async_func_t func; |
083b804c | 75 | void *data; |
8723d503 | 76 | struct async_domain *domain; |
22a9d645 AV |
77 | }; |
78 | ||
79 | static DECLARE_WAIT_QUEUE_HEAD(async_done); | |
22a9d645 AV |
80 | |
81 | static atomic_t entry_count; | |
22a9d645 | 82 | |
07416af1 RV |
83 | static long long microseconds_since(ktime_t start) |
84 | { | |
85 | ktime_t now = ktime_get(); | |
86 | return ktime_to_ns(ktime_sub(now, start)) >> 10; | |
87 | } | |
88 | ||
8723d503 | 89 | static async_cookie_t lowest_in_progress(struct async_domain *domain) |
37a76bd4 | 90 | { |
4f7e988e | 91 | struct async_entry *first = NULL; |
52722794 | 92 | async_cookie_t ret = ASYNC_COOKIE_MAX; |
37a76bd4 | 93 | unsigned long flags; |
37a76bd4 AV |
94 | |
95 | spin_lock_irqsave(&async_lock, flags); | |
9fdb04cd | 96 | |
4f7e988e RV |
97 | if (domain) { |
98 | if (!list_empty(&domain->pending)) | |
99 | first = list_first_entry(&domain->pending, | |
100 | struct async_entry, domain_list); | |
101 | } else { | |
102 | if (!list_empty(&async_global_pending)) | |
103 | first = list_first_entry(&async_global_pending, | |
104 | struct async_entry, global_list); | |
105 | } | |
9fdb04cd | 106 | |
4f7e988e RV |
107 | if (first) |
108 | ret = first->cookie; | |
9fdb04cd | 109 | |
37a76bd4 AV |
110 | spin_unlock_irqrestore(&async_lock, flags); |
111 | return ret; | |
112 | } | |
083b804c | 113 | |
22a9d645 AV |
114 | /* |
115 | * pick the first pending entry and run it | |
116 | */ | |
083b804c | 117 | static void async_run_entry_fn(struct work_struct *work) |
22a9d645 | 118 | { |
083b804c TH |
119 | struct async_entry *entry = |
120 | container_of(work, struct async_entry, work); | |
22a9d645 | 121 | unsigned long flags; |
07416af1 | 122 | ktime_t calltime; |
22a9d645 | 123 | |
52722794 | 124 | /* 1) run (and print duration) */ |
07416af1 RV |
125 | pr_debug("calling %lli_%pS @ %i\n", (long long)entry->cookie, |
126 | entry->func, task_pid_nr(current)); | |
127 | calltime = ktime_get(); | |
128 | ||
22a9d645 | 129 | entry->func(entry->data, entry->cookie); |
07416af1 RV |
130 | |
131 | pr_debug("initcall %lli_%pS returned after %lld usecs\n", | |
132 | (long long)entry->cookie, entry->func, | |
133 | microseconds_since(calltime)); | |
22a9d645 | 134 | |
52722794 | 135 | /* 2) remove self from the pending queues */ |
22a9d645 | 136 | spin_lock_irqsave(&async_lock, flags); |
9fdb04cd TH |
137 | list_del_init(&entry->domain_list); |
138 | list_del_init(&entry->global_list); | |
22a9d645 | 139 | |
52722794 | 140 | /* 3) free the entry */ |
22a9d645 AV |
141 | kfree(entry); |
142 | atomic_dec(&entry_count); | |
143 | ||
144 | spin_unlock_irqrestore(&async_lock, flags); | |
145 | ||
52722794 | 146 | /* 4) wake up any waiters */ |
22a9d645 | 147 | wake_up(&async_done); |
22a9d645 AV |
148 | } |
149 | ||
6aa09a5b RW |
150 | static async_cookie_t __async_schedule_node_domain(async_func_t func, |
151 | void *data, int node, | |
152 | struct async_domain *domain, | |
153 | struct async_entry *entry) | |
154 | { | |
155 | async_cookie_t newcookie; | |
156 | unsigned long flags; | |
157 | ||
158 | INIT_LIST_HEAD(&entry->domain_list); | |
159 | INIT_LIST_HEAD(&entry->global_list); | |
160 | INIT_WORK(&entry->work, async_run_entry_fn); | |
161 | entry->func = func; | |
162 | entry->data = data; | |
163 | entry->domain = domain; | |
164 | ||
165 | spin_lock_irqsave(&async_lock, flags); | |
166 | ||
167 | /* allocate cookie and queue */ | |
168 | newcookie = entry->cookie = next_cookie++; | |
169 | ||
170 | list_add_tail(&entry->domain_list, &domain->pending); | |
171 | if (domain->registered) | |
172 | list_add_tail(&entry->global_list, &async_global_pending); | |
173 | ||
174 | atomic_inc(&entry_count); | |
175 | spin_unlock_irqrestore(&async_lock, flags); | |
176 | ||
177 | /* schedule for execution */ | |
bf52b1ac | 178 | queue_work_node(node, async_wq, &entry->work); |
6aa09a5b RW |
179 | |
180 | return newcookie; | |
181 | } | |
182 | ||
6be9238e AD |
183 | /** |
184 | * async_schedule_node_domain - NUMA specific version of async_schedule_domain | |
185 | * @func: function to execute asynchronously | |
186 | * @data: data pointer to pass to the function | |
187 | * @node: NUMA node that we want to schedule this on or close to | |
188 | * @domain: the domain | |
189 | * | |
190 | * Returns an async_cookie_t that may be used for checkpointing later. | |
191 | * @domain may be used in the async_synchronize_*_domain() functions to | |
192 | * wait within a certain synchronization domain rather than globally. | |
193 | * | |
194 | * Note: This function may be called from atomic or non-atomic contexts. | |
195 | * | |
196 | * The node requested will be honored on a best effort basis. If the node | |
197 | * has no CPUs associated with it then the work is distributed among all | |
198 | * available CPUs. | |
199 | */ | |
200 | async_cookie_t async_schedule_node_domain(async_func_t func, void *data, | |
201 | int node, struct async_domain *domain) | |
22a9d645 AV |
202 | { |
203 | struct async_entry *entry; | |
204 | unsigned long flags; | |
205 | async_cookie_t newcookie; | |
22a9d645 AV |
206 | |
207 | /* allow irq-off callers */ | |
208 | entry = kzalloc(sizeof(struct async_entry), GFP_ATOMIC); | |
209 | ||
210 | /* | |
211 | * If we're out of memory or if there's too much work | |
212 | * pending already, we execute synchronously. | |
213 | */ | |
083b804c | 214 | if (!entry || atomic_read(&entry_count) > MAX_WORK) { |
22a9d645 AV |
215 | kfree(entry); |
216 | spin_lock_irqsave(&async_lock, flags); | |
217 | newcookie = next_cookie++; | |
218 | spin_unlock_irqrestore(&async_lock, flags); | |
219 | ||
220 | /* low on memory.. run synchronously */ | |
362f2b09 | 221 | func(data, newcookie); |
22a9d645 AV |
222 | return newcookie; |
223 | } | |
9fdb04cd | 224 | |
6aa09a5b | 225 | return __async_schedule_node_domain(func, data, node, domain, entry); |
22a9d645 | 226 | } |
6be9238e | 227 | EXPORT_SYMBOL_GPL(async_schedule_node_domain); |
22a9d645 | 228 | |
f30d5b30 | 229 | /** |
6be9238e | 230 | * async_schedule_node - NUMA specific version of async_schedule |
362f2b09 | 231 | * @func: function to execute asynchronously |
f30d5b30 | 232 | * @data: data pointer to pass to the function |
6be9238e | 233 | * @node: NUMA node that we want to schedule this on or close to |
f30d5b30 CH |
234 | * |
235 | * Returns an async_cookie_t that may be used for checkpointing later. | |
236 | * Note: This function may be called from atomic or non-atomic contexts. | |
f30d5b30 | 237 | * |
6be9238e AD |
238 | * The node requested will be honored on a best effort basis. If the node |
239 | * has no CPUs associated with it then the work is distributed among all | |
240 | * available CPUs. | |
f30d5b30 | 241 | */ |
6be9238e | 242 | async_cookie_t async_schedule_node(async_func_t func, void *data, int node) |
22a9d645 | 243 | { |
6be9238e | 244 | return async_schedule_node_domain(func, data, node, &async_dfl_domain); |
22a9d645 | 245 | } |
6be9238e | 246 | EXPORT_SYMBOL_GPL(async_schedule_node); |
22a9d645 | 247 | |
7d4b5d7a RW |
248 | /** |
249 | * async_schedule_dev_nocall - A simplified variant of async_schedule_dev() | |
250 | * @func: function to execute asynchronously | |
251 | * @dev: device argument to be passed to function | |
252 | * | |
253 | * @dev is used as both the argument for the function and to provide NUMA | |
254 | * context for where to run the function. | |
255 | * | |
256 | * If the asynchronous execution of @func is scheduled successfully, return | |
257 | * true. Otherwise, do nothing and return false, unlike async_schedule_dev() | |
258 | * that will run the function synchronously then. | |
259 | */ | |
260 | bool async_schedule_dev_nocall(async_func_t func, struct device *dev) | |
261 | { | |
262 | struct async_entry *entry; | |
263 | ||
264 | entry = kzalloc(sizeof(struct async_entry), GFP_KERNEL); | |
265 | ||
266 | /* Give up if there is no memory or too much work. */ | |
267 | if (!entry || atomic_read(&entry_count) > MAX_WORK) { | |
268 | kfree(entry); | |
269 | return false; | |
270 | } | |
271 | ||
272 | __async_schedule_node_domain(func, dev, dev_to_node(dev), | |
273 | &async_dfl_domain, entry); | |
274 | return true; | |
275 | } | |
276 | ||
f30d5b30 CH |
277 | /** |
278 | * async_synchronize_full - synchronize all asynchronous function calls | |
279 | * | |
280 | * This function waits until all asynchronous function calls have been done. | |
281 | */ | |
22a9d645 AV |
282 | void async_synchronize_full(void) |
283 | { | |
9fdb04cd | 284 | async_synchronize_full_domain(NULL); |
22a9d645 AV |
285 | } |
286 | EXPORT_SYMBOL_GPL(async_synchronize_full); | |
287 | ||
f30d5b30 | 288 | /** |
766ccb9e | 289 | * async_synchronize_full_domain - synchronize all asynchronous function within a certain domain |
8723d503 | 290 | * @domain: the domain to synchronize |
f30d5b30 | 291 | * |
766ccb9e | 292 | * This function waits until all asynchronous function calls for the |
8723d503 | 293 | * synchronization domain specified by @domain have been done. |
f30d5b30 | 294 | */ |
2955b47d | 295 | void async_synchronize_full_domain(struct async_domain *domain) |
22a9d645 | 296 | { |
c68eee14 | 297 | async_synchronize_cookie_domain(ASYNC_COOKIE_MAX, domain); |
22a9d645 | 298 | } |
766ccb9e | 299 | EXPORT_SYMBOL_GPL(async_synchronize_full_domain); |
22a9d645 | 300 | |
f30d5b30 | 301 | /** |
766ccb9e | 302 | * async_synchronize_cookie_domain - synchronize asynchronous function calls within a certain domain with cookie checkpointing |
f30d5b30 | 303 | * @cookie: async_cookie_t to use as checkpoint |
9fdb04cd | 304 | * @domain: the domain to synchronize (%NULL for all registered domains) |
f30d5b30 | 305 | * |
766ccb9e | 306 | * This function waits until all asynchronous function calls for the |
8723d503 TH |
307 | * synchronization domain specified by @domain submitted prior to @cookie |
308 | * have been done. | |
f30d5b30 | 309 | */ |
8723d503 | 310 | void async_synchronize_cookie_domain(async_cookie_t cookie, struct async_domain *domain) |
22a9d645 | 311 | { |
07416af1 | 312 | ktime_t starttime; |
22a9d645 | 313 | |
07416af1 RV |
314 | pr_debug("async_waiting @ %i\n", task_pid_nr(current)); |
315 | starttime = ktime_get(); | |
22a9d645 | 316 | |
8723d503 | 317 | wait_event(async_done, lowest_in_progress(domain) >= cookie); |
22a9d645 | 318 | |
07416af1 RV |
319 | pr_debug("async_continuing @ %i after %lli usec\n", task_pid_nr(current), |
320 | microseconds_since(starttime)); | |
22a9d645 | 321 | } |
766ccb9e | 322 | EXPORT_SYMBOL_GPL(async_synchronize_cookie_domain); |
22a9d645 | 323 | |
f30d5b30 CH |
324 | /** |
325 | * async_synchronize_cookie - synchronize asynchronous function calls with cookie checkpointing | |
326 | * @cookie: async_cookie_t to use as checkpoint | |
327 | * | |
328 | * This function waits until all asynchronous function calls prior to @cookie | |
329 | * have been done. | |
330 | */ | |
22a9d645 AV |
331 | void async_synchronize_cookie(async_cookie_t cookie) |
332 | { | |
8723d503 | 333 | async_synchronize_cookie_domain(cookie, &async_dfl_domain); |
22a9d645 AV |
334 | } |
335 | EXPORT_SYMBOL_GPL(async_synchronize_cookie); | |
84b233ad TH |
336 | |
337 | /** | |
338 | * current_is_async - is %current an async worker task? | |
339 | * | |
340 | * Returns %true if %current is an async worker task. | |
341 | */ | |
342 | bool current_is_async(void) | |
343 | { | |
344 | struct worker *worker = current_wq_worker(); | |
345 | ||
346 | return worker && worker->current_func == async_run_entry_fn; | |
347 | } | |
581da2ca | 348 | EXPORT_SYMBOL_GPL(current_is_async); |
bf52b1ac TH |
349 | |
350 | void __init async_init(void) | |
351 | { | |
352 | /* | |
353 | * Async can schedule a number of interdependent work items. However, | |
354 | * unbound workqueues can handle only upto min_active interdependent | |
355 | * work items. The default min_active of 8 isn't sufficient for async | |
356 | * and can lead to stalls. Let's use a dedicated workqueue with raised | |
357 | * min_active. | |
358 | */ | |
359 | async_wq = alloc_workqueue("async", WQ_UNBOUND, 0); | |
360 | BUG_ON(!async_wq); | |
361 | workqueue_set_min_active(async_wq, WQ_DFL_ACTIVE); | |
362 | } |