ARM: SMDKC110: Add audio devices on board
[linux-2.6-block.git] / kernel / async.c
... / ...
CommitLineData
1/*
2 * async.c: Asynchronous function calls for boot performance
3 *
4 * (C) Copyright 2009 Intel Corporation
5 * Author: Arjan van de Ven <arjan@linux.intel.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; version 2
10 * of the License.
11 */
12
13
14/*
15
16Goals and Theory of Operation
17
18The primary goal of this feature is to reduce the kernel boot time,
19by doing various independent hardware delays and discovery operations
20decoupled and not strictly serialized.
21
22More specifically, the asynchronous function call concept allows
23certain operations (primarily during system boot) to happen
24asynchronously, out of order, while these operations still
25have their externally visible parts happen sequentially and in-order.
26(not unlike how out-of-order CPUs retire their instructions in order)
27
28Key to the asynchronous function call implementation is the concept of
29a "sequence cookie" (which, although it has an abstracted type, can be
30thought of as a monotonically incrementing number).
31
32The async core will assign each scheduled event such a sequence cookie and
33pass this to the called functions.
34
35The asynchronously called function should before doing a globally visible
36operation, such as registering device numbers, call the
37async_synchronize_cookie() function and pass in its own cookie. The
38async_synchronize_cookie() function will make sure that all asynchronous
39operations that were scheduled prior to the operation corresponding with the
40cookie have completed.
41
42Subsystem/driver initialization code that scheduled asynchronous probe
43functions, but which shares global resources with other drivers/subsystems
44that do not use the asynchronous call feature, need to do a full
45synchronization with the async_synchronize_full() function, before returning
46from their init function. This is to maintain strict ordering between the
47asynchronous and synchronous parts of the kernel.
48
49*/
50
51#include <linux/async.h>
52#include <linux/bug.h>
53#include <linux/module.h>
54#include <linux/wait.h>
55#include <linux/sched.h>
56#include <linux/init.h>
57#include <linux/kthread.h>
58#include <linux/delay.h>
59#include <linux/slab.h>
60#include <asm/atomic.h>
61
62static async_cookie_t next_cookie = 1;
63
64#define MAX_THREADS 256
65#define MAX_WORK 32768
66
67static LIST_HEAD(async_pending);
68static LIST_HEAD(async_running);
69static DEFINE_SPINLOCK(async_lock);
70
71static int async_enabled = 0;
72
73struct async_entry {
74 struct list_head list;
75 async_cookie_t cookie;
76 async_func_ptr *func;
77 void *data;
78 struct list_head *running;
79};
80
81static DECLARE_WAIT_QUEUE_HEAD(async_done);
82static DECLARE_WAIT_QUEUE_HEAD(async_new);
83
84static atomic_t entry_count;
85static atomic_t thread_count;
86
87extern int initcall_debug;
88
89
90/*
91 * MUST be called with the lock held!
92 */
93static async_cookie_t __lowest_in_progress(struct list_head *running)
94{
95 struct async_entry *entry;
96
97 if (!list_empty(running)) {
98 entry = list_first_entry(running,
99 struct async_entry, list);
100 return entry->cookie;
101 }
102
103 list_for_each_entry(entry, &async_pending, list)
104 if (entry->running == running)
105 return entry->cookie;
106
107 return next_cookie; /* "infinity" value */
108}
109
110static async_cookie_t lowest_in_progress(struct list_head *running)
111{
112 unsigned long flags;
113 async_cookie_t ret;
114
115 spin_lock_irqsave(&async_lock, flags);
116 ret = __lowest_in_progress(running);
117 spin_unlock_irqrestore(&async_lock, flags);
118 return ret;
119}
120/*
121 * pick the first pending entry and run it
122 */
123static void run_one_entry(void)
124{
125 unsigned long flags;
126 struct async_entry *entry;
127 ktime_t calltime, delta, rettime;
128
129 /* 1) pick one task from the pending queue */
130
131 spin_lock_irqsave(&async_lock, flags);
132 if (list_empty(&async_pending))
133 goto out;
134 entry = list_first_entry(&async_pending, struct async_entry, list);
135
136 /* 2) move it to the running queue */
137 list_move_tail(&entry->list, entry->running);
138 spin_unlock_irqrestore(&async_lock, flags);
139
140 /* 3) run it (and print duration)*/
141 if (initcall_debug && system_state == SYSTEM_BOOTING) {
142 printk("calling %lli_%pF @ %i\n", (long long)entry->cookie,
143 entry->func, task_pid_nr(current));
144 calltime = ktime_get();
145 }
146 entry->func(entry->data, entry->cookie);
147 if (initcall_debug && system_state == SYSTEM_BOOTING) {
148 rettime = ktime_get();
149 delta = ktime_sub(rettime, calltime);
150 printk("initcall %lli_%pF returned 0 after %lld usecs\n",
151 (long long)entry->cookie,
152 entry->func,
153 (long long)ktime_to_ns(delta) >> 10);
154 }
155
156 /* 4) remove it from the running queue */
157 spin_lock_irqsave(&async_lock, flags);
158 list_del(&entry->list);
159
160 /* 5) free the entry */
161 kfree(entry);
162 atomic_dec(&entry_count);
163
164 spin_unlock_irqrestore(&async_lock, flags);
165
166 /* 6) wake up any waiters. */
167 wake_up(&async_done);
168 return;
169
170out:
171 spin_unlock_irqrestore(&async_lock, flags);
172}
173
174
175static async_cookie_t __async_schedule(async_func_ptr *ptr, void *data, struct list_head *running)
176{
177 struct async_entry *entry;
178 unsigned long flags;
179 async_cookie_t newcookie;
180
181
182 /* allow irq-off callers */
183 entry = kzalloc(sizeof(struct async_entry), GFP_ATOMIC);
184
185 /*
186 * If we're out of memory or if there's too much work
187 * pending already, we execute synchronously.
188 */
189 if (!async_enabled || !entry || atomic_read(&entry_count) > MAX_WORK) {
190 kfree(entry);
191 spin_lock_irqsave(&async_lock, flags);
192 newcookie = next_cookie++;
193 spin_unlock_irqrestore(&async_lock, flags);
194
195 /* low on memory.. run synchronously */
196 ptr(data, newcookie);
197 return newcookie;
198 }
199 entry->func = ptr;
200 entry->data = data;
201 entry->running = running;
202
203 spin_lock_irqsave(&async_lock, flags);
204 newcookie = entry->cookie = next_cookie++;
205 list_add_tail(&entry->list, &async_pending);
206 atomic_inc(&entry_count);
207 spin_unlock_irqrestore(&async_lock, flags);
208 wake_up(&async_new);
209 return newcookie;
210}
211
212/**
213 * async_schedule - schedule a function for asynchronous execution
214 * @ptr: function to execute asynchronously
215 * @data: data pointer to pass to the function
216 *
217 * Returns an async_cookie_t that may be used for checkpointing later.
218 * Note: This function may be called from atomic or non-atomic contexts.
219 */
220async_cookie_t async_schedule(async_func_ptr *ptr, void *data)
221{
222 return __async_schedule(ptr, data, &async_running);
223}
224EXPORT_SYMBOL_GPL(async_schedule);
225
226/**
227 * async_schedule_domain - schedule a function for asynchronous execution within a certain domain
228 * @ptr: function to execute asynchronously
229 * @data: data pointer to pass to the function
230 * @running: running list for the domain
231 *
232 * Returns an async_cookie_t that may be used for checkpointing later.
233 * @running may be used in the async_synchronize_*_domain() functions
234 * to wait within a certain synchronization domain rather than globally.
235 * A synchronization domain is specified via the running queue @running to use.
236 * Note: This function may be called from atomic or non-atomic contexts.
237 */
238async_cookie_t async_schedule_domain(async_func_ptr *ptr, void *data,
239 struct list_head *running)
240{
241 return __async_schedule(ptr, data, running);
242}
243EXPORT_SYMBOL_GPL(async_schedule_domain);
244
245/**
246 * async_synchronize_full - synchronize all asynchronous function calls
247 *
248 * This function waits until all asynchronous function calls have been done.
249 */
250void async_synchronize_full(void)
251{
252 do {
253 async_synchronize_cookie(next_cookie);
254 } while (!list_empty(&async_running) || !list_empty(&async_pending));
255}
256EXPORT_SYMBOL_GPL(async_synchronize_full);
257
258/**
259 * async_synchronize_full_domain - synchronize all asynchronous function within a certain domain
260 * @list: running list to synchronize on
261 *
262 * This function waits until all asynchronous function calls for the
263 * synchronization domain specified by the running list @list have been done.
264 */
265void async_synchronize_full_domain(struct list_head *list)
266{
267 async_synchronize_cookie_domain(next_cookie, list);
268}
269EXPORT_SYMBOL_GPL(async_synchronize_full_domain);
270
271/**
272 * async_synchronize_cookie_domain - synchronize asynchronous function calls within a certain domain with cookie checkpointing
273 * @cookie: async_cookie_t to use as checkpoint
274 * @running: running list to synchronize on
275 *
276 * This function waits until all asynchronous function calls for the
277 * synchronization domain specified by the running list @list submitted
278 * prior to @cookie have been done.
279 */
280void async_synchronize_cookie_domain(async_cookie_t cookie,
281 struct list_head *running)
282{
283 ktime_t starttime, delta, endtime;
284
285 if (initcall_debug && system_state == SYSTEM_BOOTING) {
286 printk("async_waiting @ %i\n", task_pid_nr(current));
287 starttime = ktime_get();
288 }
289
290 wait_event(async_done, lowest_in_progress(running) >= cookie);
291
292 if (initcall_debug && system_state == SYSTEM_BOOTING) {
293 endtime = ktime_get();
294 delta = ktime_sub(endtime, starttime);
295
296 printk("async_continuing @ %i after %lli usec\n",
297 task_pid_nr(current),
298 (long long)ktime_to_ns(delta) >> 10);
299 }
300}
301EXPORT_SYMBOL_GPL(async_synchronize_cookie_domain);
302
303/**
304 * async_synchronize_cookie - synchronize asynchronous function calls with cookie checkpointing
305 * @cookie: async_cookie_t to use as checkpoint
306 *
307 * This function waits until all asynchronous function calls prior to @cookie
308 * have been done.
309 */
310void async_synchronize_cookie(async_cookie_t cookie)
311{
312 async_synchronize_cookie_domain(cookie, &async_running);
313}
314EXPORT_SYMBOL_GPL(async_synchronize_cookie);
315
316
317static int async_thread(void *unused)
318{
319 DECLARE_WAITQUEUE(wq, current);
320 add_wait_queue(&async_new, &wq);
321
322 while (!kthread_should_stop()) {
323 int ret = HZ;
324 set_current_state(TASK_INTERRUPTIBLE);
325 /*
326 * check the list head without lock.. false positives
327 * are dealt with inside run_one_entry() while holding
328 * the lock.
329 */
330 rmb();
331 if (!list_empty(&async_pending))
332 run_one_entry();
333 else
334 ret = schedule_timeout(HZ);
335
336 if (ret == 0) {
337 /*
338 * we timed out, this means we as thread are redundant.
339 * we sign off and die, but we to avoid any races there
340 * is a last-straw check to see if work snuck in.
341 */
342 atomic_dec(&thread_count);
343 wmb(); /* manager must see our departure first */
344 if (list_empty(&async_pending))
345 break;
346 /*
347 * woops work came in between us timing out and us
348 * signing off; we need to stay alive and keep working.
349 */
350 atomic_inc(&thread_count);
351 }
352 }
353 remove_wait_queue(&async_new, &wq);
354
355 return 0;
356}
357
358static int async_manager_thread(void *unused)
359{
360 DECLARE_WAITQUEUE(wq, current);
361 add_wait_queue(&async_new, &wq);
362
363 while (!kthread_should_stop()) {
364 int tc, ec;
365
366 set_current_state(TASK_INTERRUPTIBLE);
367
368 tc = atomic_read(&thread_count);
369 rmb();
370 ec = atomic_read(&entry_count);
371
372 while (tc < ec && tc < MAX_THREADS) {
373 if (IS_ERR(kthread_run(async_thread, NULL, "async/%i",
374 tc))) {
375 msleep(100);
376 continue;
377 }
378 atomic_inc(&thread_count);
379 tc++;
380 }
381
382 schedule();
383 }
384 remove_wait_queue(&async_new, &wq);
385
386 return 0;
387}
388
389static int __init async_init(void)
390{
391 async_enabled =
392 !IS_ERR(kthread_run(async_manager_thread, NULL, "async/mgr"));
393
394 WARN_ON(!async_enabled);
395 return 0;
396}
397
398core_initcall(async_init);