ipw2x00: comment typo fix encryptiong to encryption
[linux-2.6-block.git] / kernel / async.c
CommitLineData
22a9d645
AV
1/*
2 * async.c: Asynchronous function calls for boot performance
3 *
4 * (C) Copyright 2009 Intel Corporation
5 * Author: Arjan van de Ven <arjan@linux.intel.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; version 2
10 * of the License.
11 */
12
13
14/*
15
16Goals and Theory of Operation
17
18The primary goal of this feature is to reduce the kernel boot time,
19by doing various independent hardware delays and discovery operations
20decoupled and not strictly serialized.
21
22More specifically, the asynchronous function call concept allows
23certain operations (primarily during system boot) to happen
24asynchronously, out of order, while these operations still
25have their externally visible parts happen sequentially and in-order.
26(not unlike how out-of-order CPUs retire their instructions in order)
27
28Key to the asynchronous function call implementation is the concept of
29a "sequence cookie" (which, although it has an abstracted type, can be
30thought of as a monotonically incrementing number).
31
32The async core will assign each scheduled event such a sequence cookie and
33pass this to the called functions.
34
35The asynchronously called function should before doing a globally visible
36operation, such as registering device numbers, call the
37async_synchronize_cookie() function and pass in its own cookie. The
38async_synchronize_cookie() function will make sure that all asynchronous
39operations that were scheduled prior to the operation corresponding with the
40cookie have completed.
41
42Subsystem/driver initialization code that scheduled asynchronous probe
43functions, but which shares global resources with other drivers/subsystems
44that do not use the asynchronous call feature, need to do a full
45synchronization with the async_synchronize_full() function, before returning
46from their init function. This is to maintain strict ordering between the
47asynchronous and synchronous parts of the kernel.
48
49*/
50
51#include <linux/async.h>
84c15027
PM
52#include <linux/atomic.h>
53#include <linux/ktime.h>
22a9d645
AV
54#include <linux/module.h>
55#include <linux/wait.h>
56#include <linux/sched.h>
5a0e3ad6 57#include <linux/slab.h>
083b804c 58#include <linux/workqueue.h>
22a9d645
AV
59
60static async_cookie_t next_cookie = 1;
61
22a9d645
AV
62#define MAX_WORK 32768
63
64static LIST_HEAD(async_pending);
65static LIST_HEAD(async_running);
66static DEFINE_SPINLOCK(async_lock);
67
68struct async_entry {
083b804c
TH
69 struct list_head list;
70 struct work_struct work;
71 async_cookie_t cookie;
72 async_func_ptr *func;
73 void *data;
74 struct list_head *running;
22a9d645
AV
75};
76
77static DECLARE_WAIT_QUEUE_HEAD(async_done);
22a9d645
AV
78
79static atomic_t entry_count;
22a9d645
AV
80
81extern int initcall_debug;
82
83
84/*
85 * MUST be called with the lock held!
86 */
87static async_cookie_t __lowest_in_progress(struct list_head *running)
88{
89 struct async_entry *entry;
d5a877e8 90
37a76bd4
AV
91 if (!list_empty(running)) {
92 entry = list_first_entry(running,
22a9d645 93 struct async_entry, list);
3af968e0 94 return entry->cookie;
22a9d645
AV
95 }
96
3af968e0
LT
97 list_for_each_entry(entry, &async_pending, list)
98 if (entry->running == running)
99 return entry->cookie;
d5a877e8 100
3af968e0 101 return next_cookie; /* "infinity" value */
22a9d645 102}
37a76bd4
AV
103
104static async_cookie_t lowest_in_progress(struct list_head *running)
105{
106 unsigned long flags;
107 async_cookie_t ret;
108
109 spin_lock_irqsave(&async_lock, flags);
110 ret = __lowest_in_progress(running);
111 spin_unlock_irqrestore(&async_lock, flags);
112 return ret;
113}
083b804c 114
22a9d645
AV
115/*
116 * pick the first pending entry and run it
117 */
083b804c 118static void async_run_entry_fn(struct work_struct *work)
22a9d645 119{
083b804c
TH
120 struct async_entry *entry =
121 container_of(work, struct async_entry, work);
22a9d645 122 unsigned long flags;
22a9d645
AV
123 ktime_t calltime, delta, rettime;
124
083b804c 125 /* 1) move self to the running queue */
22a9d645 126 spin_lock_irqsave(&async_lock, flags);
f7de7621 127 list_move_tail(&entry->list, entry->running);
22a9d645
AV
128 spin_unlock_irqrestore(&async_lock, flags);
129
083b804c 130 /* 2) run (and print duration) */
ad160d23 131 if (initcall_debug && system_state == SYSTEM_BOOTING) {
84c15027
PM
132 printk(KERN_DEBUG "calling %lli_%pF @ %i\n",
133 (long long)entry->cookie,
58763a29 134 entry->func, task_pid_nr(current));
22a9d645
AV
135 calltime = ktime_get();
136 }
137 entry->func(entry->data, entry->cookie);
ad160d23 138 if (initcall_debug && system_state == SYSTEM_BOOTING) {
22a9d645
AV
139 rettime = ktime_get();
140 delta = ktime_sub(rettime, calltime);
84c15027 141 printk(KERN_DEBUG "initcall %lli_%pF returned 0 after %lld usecs\n",
58763a29
AM
142 (long long)entry->cookie,
143 entry->func,
144 (long long)ktime_to_ns(delta) >> 10);
22a9d645
AV
145 }
146
083b804c 147 /* 3) remove self from the running queue */
22a9d645
AV
148 spin_lock_irqsave(&async_lock, flags);
149 list_del(&entry->list);
150
083b804c 151 /* 4) free the entry */
22a9d645
AV
152 kfree(entry);
153 atomic_dec(&entry_count);
154
155 spin_unlock_irqrestore(&async_lock, flags);
156
083b804c 157 /* 5) wake up any waiters */
22a9d645 158 wake_up(&async_done);
22a9d645
AV
159}
160
22a9d645
AV
161static async_cookie_t __async_schedule(async_func_ptr *ptr, void *data, struct list_head *running)
162{
163 struct async_entry *entry;
164 unsigned long flags;
165 async_cookie_t newcookie;
22a9d645
AV
166
167 /* allow irq-off callers */
168 entry = kzalloc(sizeof(struct async_entry), GFP_ATOMIC);
169
170 /*
171 * If we're out of memory or if there's too much work
172 * pending already, we execute synchronously.
173 */
083b804c 174 if (!entry || atomic_read(&entry_count) > MAX_WORK) {
22a9d645
AV
175 kfree(entry);
176 spin_lock_irqsave(&async_lock, flags);
177 newcookie = next_cookie++;
178 spin_unlock_irqrestore(&async_lock, flags);
179
180 /* low on memory.. run synchronously */
181 ptr(data, newcookie);
182 return newcookie;
183 }
083b804c 184 INIT_WORK(&entry->work, async_run_entry_fn);
22a9d645
AV
185 entry->func = ptr;
186 entry->data = data;
187 entry->running = running;
188
189 spin_lock_irqsave(&async_lock, flags);
190 newcookie = entry->cookie = next_cookie++;
191 list_add_tail(&entry->list, &async_pending);
192 atomic_inc(&entry_count);
193 spin_unlock_irqrestore(&async_lock, flags);
083b804c
TH
194
195 /* schedule for execution */
196 queue_work(system_unbound_wq, &entry->work);
197
22a9d645
AV
198 return newcookie;
199}
200
f30d5b30
CH
201/**
202 * async_schedule - schedule a function for asynchronous execution
203 * @ptr: function to execute asynchronously
204 * @data: data pointer to pass to the function
205 *
206 * Returns an async_cookie_t that may be used for checkpointing later.
207 * Note: This function may be called from atomic or non-atomic contexts.
208 */
22a9d645
AV
209async_cookie_t async_schedule(async_func_ptr *ptr, void *data)
210{
7a89bbc7 211 return __async_schedule(ptr, data, &async_running);
22a9d645
AV
212}
213EXPORT_SYMBOL_GPL(async_schedule);
214
f30d5b30 215/**
766ccb9e 216 * async_schedule_domain - schedule a function for asynchronous execution within a certain domain
f30d5b30
CH
217 * @ptr: function to execute asynchronously
218 * @data: data pointer to pass to the function
766ccb9e 219 * @running: running list for the domain
f30d5b30
CH
220 *
221 * Returns an async_cookie_t that may be used for checkpointing later.
766ccb9e
CH
222 * @running may be used in the async_synchronize_*_domain() functions
223 * to wait within a certain synchronization domain rather than globally.
224 * A synchronization domain is specified via the running queue @running to use.
f30d5b30
CH
225 * Note: This function may be called from atomic or non-atomic contexts.
226 */
766ccb9e
CH
227async_cookie_t async_schedule_domain(async_func_ptr *ptr, void *data,
228 struct list_head *running)
22a9d645
AV
229{
230 return __async_schedule(ptr, data, running);
231}
766ccb9e 232EXPORT_SYMBOL_GPL(async_schedule_domain);
22a9d645 233
f30d5b30
CH
234/**
235 * async_synchronize_full - synchronize all asynchronous function calls
236 *
237 * This function waits until all asynchronous function calls have been done.
238 */
22a9d645
AV
239void async_synchronize_full(void)
240{
33b04b93
AV
241 do {
242 async_synchronize_cookie(next_cookie);
243 } while (!list_empty(&async_running) || !list_empty(&async_pending));
22a9d645
AV
244}
245EXPORT_SYMBOL_GPL(async_synchronize_full);
246
f30d5b30 247/**
766ccb9e 248 * async_synchronize_full_domain - synchronize all asynchronous function within a certain domain
f30d5b30
CH
249 * @list: running list to synchronize on
250 *
766ccb9e
CH
251 * This function waits until all asynchronous function calls for the
252 * synchronization domain specified by the running list @list have been done.
f30d5b30 253 */
766ccb9e 254void async_synchronize_full_domain(struct list_head *list)
22a9d645 255{
766ccb9e 256 async_synchronize_cookie_domain(next_cookie, list);
22a9d645 257}
766ccb9e 258EXPORT_SYMBOL_GPL(async_synchronize_full_domain);
22a9d645 259
f30d5b30 260/**
766ccb9e 261 * async_synchronize_cookie_domain - synchronize asynchronous function calls within a certain domain with cookie checkpointing
f30d5b30
CH
262 * @cookie: async_cookie_t to use as checkpoint
263 * @running: running list to synchronize on
264 *
766ccb9e
CH
265 * This function waits until all asynchronous function calls for the
266 * synchronization domain specified by the running list @list submitted
267 * prior to @cookie have been done.
f30d5b30 268 */
766ccb9e
CH
269void async_synchronize_cookie_domain(async_cookie_t cookie,
270 struct list_head *running)
22a9d645
AV
271{
272 ktime_t starttime, delta, endtime;
273
ad160d23 274 if (initcall_debug && system_state == SYSTEM_BOOTING) {
84c15027 275 printk(KERN_DEBUG "async_waiting @ %i\n", task_pid_nr(current));
22a9d645
AV
276 starttime = ktime_get();
277 }
278
37a76bd4 279 wait_event(async_done, lowest_in_progress(running) >= cookie);
22a9d645 280
ad160d23 281 if (initcall_debug && system_state == SYSTEM_BOOTING) {
22a9d645
AV
282 endtime = ktime_get();
283 delta = ktime_sub(endtime, starttime);
284
84c15027 285 printk(KERN_DEBUG "async_continuing @ %i after %lli usec\n",
58763a29
AM
286 task_pid_nr(current),
287 (long long)ktime_to_ns(delta) >> 10);
22a9d645
AV
288 }
289}
766ccb9e 290EXPORT_SYMBOL_GPL(async_synchronize_cookie_domain);
22a9d645 291
f30d5b30
CH
292/**
293 * async_synchronize_cookie - synchronize asynchronous function calls with cookie checkpointing
294 * @cookie: async_cookie_t to use as checkpoint
295 *
296 * This function waits until all asynchronous function calls prior to @cookie
297 * have been done.
298 */
22a9d645
AV
299void async_synchronize_cookie(async_cookie_t cookie)
300{
766ccb9e 301 async_synchronize_cookie_domain(cookie, &async_running);
22a9d645
AV
302}
303EXPORT_SYMBOL_GPL(async_synchronize_cookie);