Merge tag 'backlight-next-6.4' of git://git.kernel.org/pub/scm/linux/kernel/git/lee...
[linux-block.git] / kernel / async.c
CommitLineData
b886d83c 1// SPDX-License-Identifier: GPL-2.0-only
22a9d645
AV
2/*
3 * async.c: Asynchronous function calls for boot performance
4 *
5 * (C) Copyright 2009 Intel Corporation
6 * Author: Arjan van de Ven <arjan@linux.intel.com>
22a9d645
AV
7 */
8
9
10/*
11
12Goals and Theory of Operation
13
14The primary goal of this feature is to reduce the kernel boot time,
15by doing various independent hardware delays and discovery operations
16decoupled and not strictly serialized.
17
18More specifically, the asynchronous function call concept allows
19certain operations (primarily during system boot) to happen
20asynchronously, out of order, while these operations still
21have their externally visible parts happen sequentially and in-order.
22(not unlike how out-of-order CPUs retire their instructions in order)
23
24Key to the asynchronous function call implementation is the concept of
25a "sequence cookie" (which, although it has an abstracted type, can be
26thought of as a monotonically incrementing number).
27
28The async core will assign each scheduled event such a sequence cookie and
29pass this to the called functions.
30
31The asynchronously called function should before doing a globally visible
32operation, such as registering device numbers, call the
33async_synchronize_cookie() function and pass in its own cookie. The
34async_synchronize_cookie() function will make sure that all asynchronous
35operations that were scheduled prior to the operation corresponding with the
36cookie have completed.
37
38Subsystem/driver initialization code that scheduled asynchronous probe
39functions, but which shares global resources with other drivers/subsystems
40that do not use the asynchronous call feature, need to do a full
41synchronization with the async_synchronize_full() function, before returning
42from their init function. This is to maintain strict ordering between the
43asynchronous and synchronous parts of the kernel.
44
45*/
46
47#include <linux/async.h>
84c15027
PM
48#include <linux/atomic.h>
49#include <linux/ktime.h>
9984de1a 50#include <linux/export.h>
22a9d645
AV
51#include <linux/wait.h>
52#include <linux/sched.h>
5a0e3ad6 53#include <linux/slab.h>
083b804c 54#include <linux/workqueue.h>
22a9d645 55
84b233ad
TH
56#include "workqueue_internal.h"
57
22a9d645
AV
58static async_cookie_t next_cookie = 1;
59
c68eee14
TH
60#define MAX_WORK 32768
61#define ASYNC_COOKIE_MAX ULLONG_MAX /* infinity cookie */
22a9d645 62
9fdb04cd 63static LIST_HEAD(async_global_pending); /* pending from all registered doms */
8723d503 64static ASYNC_DOMAIN(async_dfl_domain);
22a9d645
AV
65static DEFINE_SPINLOCK(async_lock);
66
67struct async_entry {
9fdb04cd
TH
68 struct list_head domain_list;
69 struct list_head global_list;
083b804c
TH
70 struct work_struct work;
71 async_cookie_t cookie;
362f2b09 72 async_func_t func;
083b804c 73 void *data;
8723d503 74 struct async_domain *domain;
22a9d645
AV
75};
76
77static DECLARE_WAIT_QUEUE_HEAD(async_done);
22a9d645
AV
78
79static atomic_t entry_count;
22a9d645 80
07416af1
RV
81static long long microseconds_since(ktime_t start)
82{
83 ktime_t now = ktime_get();
84 return ktime_to_ns(ktime_sub(now, start)) >> 10;
85}
86
8723d503 87static async_cookie_t lowest_in_progress(struct async_domain *domain)
37a76bd4 88{
4f7e988e 89 struct async_entry *first = NULL;
52722794 90 async_cookie_t ret = ASYNC_COOKIE_MAX;
37a76bd4 91 unsigned long flags;
37a76bd4
AV
92
93 spin_lock_irqsave(&async_lock, flags);
9fdb04cd 94
4f7e988e
RV
95 if (domain) {
96 if (!list_empty(&domain->pending))
97 first = list_first_entry(&domain->pending,
98 struct async_entry, domain_list);
99 } else {
100 if (!list_empty(&async_global_pending))
101 first = list_first_entry(&async_global_pending,
102 struct async_entry, global_list);
103 }
9fdb04cd 104
4f7e988e
RV
105 if (first)
106 ret = first->cookie;
9fdb04cd 107
37a76bd4
AV
108 spin_unlock_irqrestore(&async_lock, flags);
109 return ret;
110}
083b804c 111
22a9d645
AV
112/*
113 * pick the first pending entry and run it
114 */
083b804c 115static void async_run_entry_fn(struct work_struct *work)
22a9d645 116{
083b804c
TH
117 struct async_entry *entry =
118 container_of(work, struct async_entry, work);
22a9d645 119 unsigned long flags;
07416af1 120 ktime_t calltime;
22a9d645 121
52722794 122 /* 1) run (and print duration) */
07416af1
RV
123 pr_debug("calling %lli_%pS @ %i\n", (long long)entry->cookie,
124 entry->func, task_pid_nr(current));
125 calltime = ktime_get();
126
22a9d645 127 entry->func(entry->data, entry->cookie);
07416af1
RV
128
129 pr_debug("initcall %lli_%pS returned after %lld usecs\n",
130 (long long)entry->cookie, entry->func,
131 microseconds_since(calltime));
22a9d645 132
52722794 133 /* 2) remove self from the pending queues */
22a9d645 134 spin_lock_irqsave(&async_lock, flags);
9fdb04cd
TH
135 list_del_init(&entry->domain_list);
136 list_del_init(&entry->global_list);
22a9d645 137
52722794 138 /* 3) free the entry */
22a9d645
AV
139 kfree(entry);
140 atomic_dec(&entry_count);
141
142 spin_unlock_irqrestore(&async_lock, flags);
143
52722794 144 /* 4) wake up any waiters */
22a9d645 145 wake_up(&async_done);
22a9d645
AV
146}
147
6be9238e
AD
148/**
149 * async_schedule_node_domain - NUMA specific version of async_schedule_domain
150 * @func: function to execute asynchronously
151 * @data: data pointer to pass to the function
152 * @node: NUMA node that we want to schedule this on or close to
153 * @domain: the domain
154 *
155 * Returns an async_cookie_t that may be used for checkpointing later.
156 * @domain may be used in the async_synchronize_*_domain() functions to
157 * wait within a certain synchronization domain rather than globally.
158 *
159 * Note: This function may be called from atomic or non-atomic contexts.
160 *
161 * The node requested will be honored on a best effort basis. If the node
162 * has no CPUs associated with it then the work is distributed among all
163 * available CPUs.
164 */
165async_cookie_t async_schedule_node_domain(async_func_t func, void *data,
166 int node, struct async_domain *domain)
22a9d645
AV
167{
168 struct async_entry *entry;
169 unsigned long flags;
170 async_cookie_t newcookie;
22a9d645
AV
171
172 /* allow irq-off callers */
173 entry = kzalloc(sizeof(struct async_entry), GFP_ATOMIC);
174
175 /*
176 * If we're out of memory or if there's too much work
177 * pending already, we execute synchronously.
178 */
083b804c 179 if (!entry || atomic_read(&entry_count) > MAX_WORK) {
22a9d645
AV
180 kfree(entry);
181 spin_lock_irqsave(&async_lock, flags);
182 newcookie = next_cookie++;
183 spin_unlock_irqrestore(&async_lock, flags);
184
185 /* low on memory.. run synchronously */
362f2b09 186 func(data, newcookie);
22a9d645
AV
187 return newcookie;
188 }
a0327ff0
JH
189 INIT_LIST_HEAD(&entry->domain_list);
190 INIT_LIST_HEAD(&entry->global_list);
083b804c 191 INIT_WORK(&entry->work, async_run_entry_fn);
362f2b09 192 entry->func = func;
22a9d645 193 entry->data = data;
8723d503 194 entry->domain = domain;
22a9d645
AV
195
196 spin_lock_irqsave(&async_lock, flags);
9fdb04cd
TH
197
198 /* allocate cookie and queue */
22a9d645 199 newcookie = entry->cookie = next_cookie++;
9fdb04cd
TH
200
201 list_add_tail(&entry->domain_list, &domain->pending);
202 if (domain->registered)
203 list_add_tail(&entry->global_list, &async_global_pending);
204
22a9d645
AV
205 atomic_inc(&entry_count);
206 spin_unlock_irqrestore(&async_lock, flags);
083b804c
TH
207
208 /* schedule for execution */
6be9238e 209 queue_work_node(node, system_unbound_wq, &entry->work);
083b804c 210
22a9d645
AV
211 return newcookie;
212}
6be9238e 213EXPORT_SYMBOL_GPL(async_schedule_node_domain);
22a9d645 214
f30d5b30 215/**
6be9238e 216 * async_schedule_node - NUMA specific version of async_schedule
362f2b09 217 * @func: function to execute asynchronously
f30d5b30 218 * @data: data pointer to pass to the function
6be9238e 219 * @node: NUMA node that we want to schedule this on or close to
f30d5b30
CH
220 *
221 * Returns an async_cookie_t that may be used for checkpointing later.
222 * Note: This function may be called from atomic or non-atomic contexts.
f30d5b30 223 *
6be9238e
AD
224 * The node requested will be honored on a best effort basis. If the node
225 * has no CPUs associated with it then the work is distributed among all
226 * available CPUs.
f30d5b30 227 */
6be9238e 228async_cookie_t async_schedule_node(async_func_t func, void *data, int node)
22a9d645 229{
6be9238e 230 return async_schedule_node_domain(func, data, node, &async_dfl_domain);
22a9d645 231}
6be9238e 232EXPORT_SYMBOL_GPL(async_schedule_node);
22a9d645 233
f30d5b30
CH
234/**
235 * async_synchronize_full - synchronize all asynchronous function calls
236 *
237 * This function waits until all asynchronous function calls have been done.
238 */
22a9d645
AV
239void async_synchronize_full(void)
240{
9fdb04cd 241 async_synchronize_full_domain(NULL);
22a9d645
AV
242}
243EXPORT_SYMBOL_GPL(async_synchronize_full);
244
f30d5b30 245/**
766ccb9e 246 * async_synchronize_full_domain - synchronize all asynchronous function within a certain domain
8723d503 247 * @domain: the domain to synchronize
f30d5b30 248 *
766ccb9e 249 * This function waits until all asynchronous function calls for the
8723d503 250 * synchronization domain specified by @domain have been done.
f30d5b30 251 */
2955b47d 252void async_synchronize_full_domain(struct async_domain *domain)
22a9d645 253{
c68eee14 254 async_synchronize_cookie_domain(ASYNC_COOKIE_MAX, domain);
22a9d645 255}
766ccb9e 256EXPORT_SYMBOL_GPL(async_synchronize_full_domain);
22a9d645 257
f30d5b30 258/**
766ccb9e 259 * async_synchronize_cookie_domain - synchronize asynchronous function calls within a certain domain with cookie checkpointing
f30d5b30 260 * @cookie: async_cookie_t to use as checkpoint
9fdb04cd 261 * @domain: the domain to synchronize (%NULL for all registered domains)
f30d5b30 262 *
766ccb9e 263 * This function waits until all asynchronous function calls for the
8723d503
TH
264 * synchronization domain specified by @domain submitted prior to @cookie
265 * have been done.
f30d5b30 266 */
8723d503 267void async_synchronize_cookie_domain(async_cookie_t cookie, struct async_domain *domain)
22a9d645 268{
07416af1 269 ktime_t starttime;
22a9d645 270
07416af1
RV
271 pr_debug("async_waiting @ %i\n", task_pid_nr(current));
272 starttime = ktime_get();
22a9d645 273
8723d503 274 wait_event(async_done, lowest_in_progress(domain) >= cookie);
22a9d645 275
07416af1
RV
276 pr_debug("async_continuing @ %i after %lli usec\n", task_pid_nr(current),
277 microseconds_since(starttime));
22a9d645 278}
766ccb9e 279EXPORT_SYMBOL_GPL(async_synchronize_cookie_domain);
22a9d645 280
f30d5b30
CH
281/**
282 * async_synchronize_cookie - synchronize asynchronous function calls with cookie checkpointing
283 * @cookie: async_cookie_t to use as checkpoint
284 *
285 * This function waits until all asynchronous function calls prior to @cookie
286 * have been done.
287 */
22a9d645
AV
288void async_synchronize_cookie(async_cookie_t cookie)
289{
8723d503 290 async_synchronize_cookie_domain(cookie, &async_dfl_domain);
22a9d645
AV
291}
292EXPORT_SYMBOL_GPL(async_synchronize_cookie);
84b233ad
TH
293
294/**
295 * current_is_async - is %current an async worker task?
296 *
297 * Returns %true if %current is an async worker task.
298 */
299bool current_is_async(void)
300{
301 struct worker *worker = current_wq_worker();
302
303 return worker && worker->current_func == async_run_entry_fn;
304}
581da2ca 305EXPORT_SYMBOL_GPL(current_is_async);