async: Introduce async_schedule_dev_nocall()
[linux-2.6-block.git] / kernel / async.c
CommitLineData
b886d83c 1// SPDX-License-Identifier: GPL-2.0-only
22a9d645
AV
2/*
3 * async.c: Asynchronous function calls for boot performance
4 *
5 * (C) Copyright 2009 Intel Corporation
6 * Author: Arjan van de Ven <arjan@linux.intel.com>
22a9d645
AV
7 */
8
9
10/*
11
12Goals and Theory of Operation
13
14The primary goal of this feature is to reduce the kernel boot time,
15by doing various independent hardware delays and discovery operations
16decoupled and not strictly serialized.
17
18More specifically, the asynchronous function call concept allows
19certain operations (primarily during system boot) to happen
20asynchronously, out of order, while these operations still
21have their externally visible parts happen sequentially and in-order.
22(not unlike how out-of-order CPUs retire their instructions in order)
23
24Key to the asynchronous function call implementation is the concept of
25a "sequence cookie" (which, although it has an abstracted type, can be
26thought of as a monotonically incrementing number).
27
28The async core will assign each scheduled event such a sequence cookie and
29pass this to the called functions.
30
31The asynchronously called function should before doing a globally visible
32operation, such as registering device numbers, call the
33async_synchronize_cookie() function and pass in its own cookie. The
34async_synchronize_cookie() function will make sure that all asynchronous
35operations that were scheduled prior to the operation corresponding with the
36cookie have completed.
37
38Subsystem/driver initialization code that scheduled asynchronous probe
39functions, but which shares global resources with other drivers/subsystems
40that do not use the asynchronous call feature, need to do a full
41synchronization with the async_synchronize_full() function, before returning
42from their init function. This is to maintain strict ordering between the
43asynchronous and synchronous parts of the kernel.
44
45*/
46
47#include <linux/async.h>
84c15027
PM
48#include <linux/atomic.h>
49#include <linux/ktime.h>
9984de1a 50#include <linux/export.h>
22a9d645
AV
51#include <linux/wait.h>
52#include <linux/sched.h>
5a0e3ad6 53#include <linux/slab.h>
083b804c 54#include <linux/workqueue.h>
22a9d645 55
84b233ad
TH
56#include "workqueue_internal.h"
57
22a9d645
AV
58static async_cookie_t next_cookie = 1;
59
c68eee14
TH
60#define MAX_WORK 32768
61#define ASYNC_COOKIE_MAX ULLONG_MAX /* infinity cookie */
22a9d645 62
9fdb04cd 63static LIST_HEAD(async_global_pending); /* pending from all registered doms */
8723d503 64static ASYNC_DOMAIN(async_dfl_domain);
22a9d645
AV
65static DEFINE_SPINLOCK(async_lock);
66
67struct async_entry {
9fdb04cd
TH
68 struct list_head domain_list;
69 struct list_head global_list;
083b804c
TH
70 struct work_struct work;
71 async_cookie_t cookie;
362f2b09 72 async_func_t func;
083b804c 73 void *data;
8723d503 74 struct async_domain *domain;
22a9d645
AV
75};
76
77static DECLARE_WAIT_QUEUE_HEAD(async_done);
22a9d645
AV
78
79static atomic_t entry_count;
22a9d645 80
07416af1
RV
81static long long microseconds_since(ktime_t start)
82{
83 ktime_t now = ktime_get();
84 return ktime_to_ns(ktime_sub(now, start)) >> 10;
85}
86
8723d503 87static async_cookie_t lowest_in_progress(struct async_domain *domain)
37a76bd4 88{
4f7e988e 89 struct async_entry *first = NULL;
52722794 90 async_cookie_t ret = ASYNC_COOKIE_MAX;
37a76bd4 91 unsigned long flags;
37a76bd4
AV
92
93 spin_lock_irqsave(&async_lock, flags);
9fdb04cd 94
4f7e988e
RV
95 if (domain) {
96 if (!list_empty(&domain->pending))
97 first = list_first_entry(&domain->pending,
98 struct async_entry, domain_list);
99 } else {
100 if (!list_empty(&async_global_pending))
101 first = list_first_entry(&async_global_pending,
102 struct async_entry, global_list);
103 }
9fdb04cd 104
4f7e988e
RV
105 if (first)
106 ret = first->cookie;
9fdb04cd 107
37a76bd4
AV
108 spin_unlock_irqrestore(&async_lock, flags);
109 return ret;
110}
083b804c 111
22a9d645
AV
112/*
113 * pick the first pending entry and run it
114 */
083b804c 115static void async_run_entry_fn(struct work_struct *work)
22a9d645 116{
083b804c
TH
117 struct async_entry *entry =
118 container_of(work, struct async_entry, work);
22a9d645 119 unsigned long flags;
07416af1 120 ktime_t calltime;
22a9d645 121
52722794 122 /* 1) run (and print duration) */
07416af1
RV
123 pr_debug("calling %lli_%pS @ %i\n", (long long)entry->cookie,
124 entry->func, task_pid_nr(current));
125 calltime = ktime_get();
126
22a9d645 127 entry->func(entry->data, entry->cookie);
07416af1
RV
128
129 pr_debug("initcall %lli_%pS returned after %lld usecs\n",
130 (long long)entry->cookie, entry->func,
131 microseconds_since(calltime));
22a9d645 132
52722794 133 /* 2) remove self from the pending queues */
22a9d645 134 spin_lock_irqsave(&async_lock, flags);
9fdb04cd
TH
135 list_del_init(&entry->domain_list);
136 list_del_init(&entry->global_list);
22a9d645 137
52722794 138 /* 3) free the entry */
22a9d645
AV
139 kfree(entry);
140 atomic_dec(&entry_count);
141
142 spin_unlock_irqrestore(&async_lock, flags);
143
52722794 144 /* 4) wake up any waiters */
22a9d645 145 wake_up(&async_done);
22a9d645
AV
146}
147
6aa09a5b
RW
148static async_cookie_t __async_schedule_node_domain(async_func_t func,
149 void *data, int node,
150 struct async_domain *domain,
151 struct async_entry *entry)
152{
153 async_cookie_t newcookie;
154 unsigned long flags;
155
156 INIT_LIST_HEAD(&entry->domain_list);
157 INIT_LIST_HEAD(&entry->global_list);
158 INIT_WORK(&entry->work, async_run_entry_fn);
159 entry->func = func;
160 entry->data = data;
161 entry->domain = domain;
162
163 spin_lock_irqsave(&async_lock, flags);
164
165 /* allocate cookie and queue */
166 newcookie = entry->cookie = next_cookie++;
167
168 list_add_tail(&entry->domain_list, &domain->pending);
169 if (domain->registered)
170 list_add_tail(&entry->global_list, &async_global_pending);
171
172 atomic_inc(&entry_count);
173 spin_unlock_irqrestore(&async_lock, flags);
174
175 /* schedule for execution */
176 queue_work_node(node, system_unbound_wq, &entry->work);
177
178 return newcookie;
179}
180
6be9238e
AD
181/**
182 * async_schedule_node_domain - NUMA specific version of async_schedule_domain
183 * @func: function to execute asynchronously
184 * @data: data pointer to pass to the function
185 * @node: NUMA node that we want to schedule this on or close to
186 * @domain: the domain
187 *
188 * Returns an async_cookie_t that may be used for checkpointing later.
189 * @domain may be used in the async_synchronize_*_domain() functions to
190 * wait within a certain synchronization domain rather than globally.
191 *
192 * Note: This function may be called from atomic or non-atomic contexts.
193 *
194 * The node requested will be honored on a best effort basis. If the node
195 * has no CPUs associated with it then the work is distributed among all
196 * available CPUs.
197 */
198async_cookie_t async_schedule_node_domain(async_func_t func, void *data,
199 int node, struct async_domain *domain)
22a9d645
AV
200{
201 struct async_entry *entry;
202 unsigned long flags;
203 async_cookie_t newcookie;
22a9d645
AV
204
205 /* allow irq-off callers */
206 entry = kzalloc(sizeof(struct async_entry), GFP_ATOMIC);
207
208 /*
209 * If we're out of memory or if there's too much work
210 * pending already, we execute synchronously.
211 */
083b804c 212 if (!entry || atomic_read(&entry_count) > MAX_WORK) {
22a9d645
AV
213 kfree(entry);
214 spin_lock_irqsave(&async_lock, flags);
215 newcookie = next_cookie++;
216 spin_unlock_irqrestore(&async_lock, flags);
217
218 /* low on memory.. run synchronously */
362f2b09 219 func(data, newcookie);
22a9d645
AV
220 return newcookie;
221 }
9fdb04cd 222
6aa09a5b 223 return __async_schedule_node_domain(func, data, node, domain, entry);
22a9d645 224}
6be9238e 225EXPORT_SYMBOL_GPL(async_schedule_node_domain);
22a9d645 226
f30d5b30 227/**
6be9238e 228 * async_schedule_node - NUMA specific version of async_schedule
362f2b09 229 * @func: function to execute asynchronously
f30d5b30 230 * @data: data pointer to pass to the function
6be9238e 231 * @node: NUMA node that we want to schedule this on or close to
f30d5b30
CH
232 *
233 * Returns an async_cookie_t that may be used for checkpointing later.
234 * Note: This function may be called from atomic or non-atomic contexts.
f30d5b30 235 *
6be9238e
AD
236 * The node requested will be honored on a best effort basis. If the node
237 * has no CPUs associated with it then the work is distributed among all
238 * available CPUs.
f30d5b30 239 */
6be9238e 240async_cookie_t async_schedule_node(async_func_t func, void *data, int node)
22a9d645 241{
6be9238e 242 return async_schedule_node_domain(func, data, node, &async_dfl_domain);
22a9d645 243}
6be9238e 244EXPORT_SYMBOL_GPL(async_schedule_node);
22a9d645 245
7d4b5d7a
RW
246/**
247 * async_schedule_dev_nocall - A simplified variant of async_schedule_dev()
248 * @func: function to execute asynchronously
249 * @dev: device argument to be passed to function
250 *
251 * @dev is used as both the argument for the function and to provide NUMA
252 * context for where to run the function.
253 *
254 * If the asynchronous execution of @func is scheduled successfully, return
255 * true. Otherwise, do nothing and return false, unlike async_schedule_dev()
256 * that will run the function synchronously then.
257 */
258bool async_schedule_dev_nocall(async_func_t func, struct device *dev)
259{
260 struct async_entry *entry;
261
262 entry = kzalloc(sizeof(struct async_entry), GFP_KERNEL);
263
264 /* Give up if there is no memory or too much work. */
265 if (!entry || atomic_read(&entry_count) > MAX_WORK) {
266 kfree(entry);
267 return false;
268 }
269
270 __async_schedule_node_domain(func, dev, dev_to_node(dev),
271 &async_dfl_domain, entry);
272 return true;
273}
274
f30d5b30
CH
275/**
276 * async_synchronize_full - synchronize all asynchronous function calls
277 *
278 * This function waits until all asynchronous function calls have been done.
279 */
22a9d645
AV
280void async_synchronize_full(void)
281{
9fdb04cd 282 async_synchronize_full_domain(NULL);
22a9d645
AV
283}
284EXPORT_SYMBOL_GPL(async_synchronize_full);
285
f30d5b30 286/**
766ccb9e 287 * async_synchronize_full_domain - synchronize all asynchronous function within a certain domain
8723d503 288 * @domain: the domain to synchronize
f30d5b30 289 *
766ccb9e 290 * This function waits until all asynchronous function calls for the
8723d503 291 * synchronization domain specified by @domain have been done.
f30d5b30 292 */
2955b47d 293void async_synchronize_full_domain(struct async_domain *domain)
22a9d645 294{
c68eee14 295 async_synchronize_cookie_domain(ASYNC_COOKIE_MAX, domain);
22a9d645 296}
766ccb9e 297EXPORT_SYMBOL_GPL(async_synchronize_full_domain);
22a9d645 298
f30d5b30 299/**
766ccb9e 300 * async_synchronize_cookie_domain - synchronize asynchronous function calls within a certain domain with cookie checkpointing
f30d5b30 301 * @cookie: async_cookie_t to use as checkpoint
9fdb04cd 302 * @domain: the domain to synchronize (%NULL for all registered domains)
f30d5b30 303 *
766ccb9e 304 * This function waits until all asynchronous function calls for the
8723d503
TH
305 * synchronization domain specified by @domain submitted prior to @cookie
306 * have been done.
f30d5b30 307 */
8723d503 308void async_synchronize_cookie_domain(async_cookie_t cookie, struct async_domain *domain)
22a9d645 309{
07416af1 310 ktime_t starttime;
22a9d645 311
07416af1
RV
312 pr_debug("async_waiting @ %i\n", task_pid_nr(current));
313 starttime = ktime_get();
22a9d645 314
8723d503 315 wait_event(async_done, lowest_in_progress(domain) >= cookie);
22a9d645 316
07416af1
RV
317 pr_debug("async_continuing @ %i after %lli usec\n", task_pid_nr(current),
318 microseconds_since(starttime));
22a9d645 319}
766ccb9e 320EXPORT_SYMBOL_GPL(async_synchronize_cookie_domain);
22a9d645 321
f30d5b30
CH
322/**
323 * async_synchronize_cookie - synchronize asynchronous function calls with cookie checkpointing
324 * @cookie: async_cookie_t to use as checkpoint
325 *
326 * This function waits until all asynchronous function calls prior to @cookie
327 * have been done.
328 */
22a9d645
AV
329void async_synchronize_cookie(async_cookie_t cookie)
330{
8723d503 331 async_synchronize_cookie_domain(cookie, &async_dfl_domain);
22a9d645
AV
332}
333EXPORT_SYMBOL_GPL(async_synchronize_cookie);
84b233ad
TH
334
335/**
336 * current_is_async - is %current an async worker task?
337 *
338 * Returns %true if %current is an async worker task.
339 */
340bool current_is_async(void)
341{
342 struct worker *worker = current_wq_worker();
343
344 return worker && worker->current_func == async_run_entry_fn;
345}
581da2ca 346EXPORT_SYMBOL_GPL(current_is_async);