mm/buddy: cleanup on should_fail_alloc_page
[linux-2.6-block.git] / mm / backing-dev.c
CommitLineData
3fcfab16
AM
1
2#include <linux/wait.h>
3#include <linux/backing-dev.h>
03ba3782
JA
4#include <linux/kthread.h>
5#include <linux/freezer.h>
3fcfab16 6#include <linux/fs.h>
26160158 7#include <linux/pagemap.h>
03ba3782 8#include <linux/mm.h>
3fcfab16
AM
9#include <linux/sched.h>
10#include <linux/module.h>
cf0ca9fe
PZ
11#include <linux/writeback.h>
12#include <linux/device.h>
455b2864 13#include <trace/events/writeback.h>
cf0ca9fe 14
c3c53206
JA
15static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
16
26160158 17struct backing_dev_info default_backing_dev_info = {
d993831f 18 .name = "default",
26160158
JA
19 .ra_pages = VM_MAX_READAHEAD * 1024 / PAGE_CACHE_SIZE,
20 .state = 0,
21 .capabilities = BDI_CAP_MAP_COPY,
26160158
JA
22};
23EXPORT_SYMBOL_GPL(default_backing_dev_info);
cf0ca9fe 24
5129a469
JE
25struct backing_dev_info noop_backing_dev_info = {
26 .name = "noop",
976e48f8 27 .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK,
5129a469
JE
28};
29EXPORT_SYMBOL_GPL(noop_backing_dev_info);
30
cf0ca9fe 31static struct class *bdi_class;
cfc4ba53
JA
32
33/*
34 * bdi_lock protects updates to bdi_list and bdi_pending_list, as well as
35 * reader side protection for bdi_pending_list. bdi_list has RCU reader side
36 * locking.
37 */
03ba3782 38DEFINE_SPINLOCK(bdi_lock);
66f3b8e2 39LIST_HEAD(bdi_list);
03ba3782
JA
40LIST_HEAD(bdi_pending_list);
41
42static struct task_struct *sync_supers_tsk;
43static struct timer_list sync_supers_timer;
44
45static int bdi_sync_supers(void *);
46static void sync_supers_timer_fn(unsigned long);
03ba3782 47
f758eeab
CH
48void bdi_lock_two(struct bdi_writeback *wb1, struct bdi_writeback *wb2)
49{
50 if (wb1 < wb2) {
51 spin_lock(&wb1->list_lock);
52 spin_lock_nested(&wb2->list_lock, 1);
53 } else {
54 spin_lock(&wb2->list_lock);
55 spin_lock_nested(&wb1->list_lock, 1);
56 }
57}
58
76f1418b
MS
59#ifdef CONFIG_DEBUG_FS
60#include <linux/debugfs.h>
61#include <linux/seq_file.h>
62
63static struct dentry *bdi_debug_root;
64
65static void bdi_debug_init(void)
66{
67 bdi_debug_root = debugfs_create_dir("bdi", NULL);
68}
69
70static int bdi_debug_stats_show(struct seq_file *m, void *v)
71{
72 struct backing_dev_info *bdi = m->private;
c1955ce3 73 struct bdi_writeback *wb = &bdi->wb;
364aeb28
DR
74 unsigned long background_thresh;
75 unsigned long dirty_thresh;
76 unsigned long bdi_thresh;
345227d7 77 unsigned long nr_dirty, nr_io, nr_more_io;
f09b00d3
JA
78 struct inode *inode;
79
345227d7 80 nr_dirty = nr_io = nr_more_io = 0;
f758eeab 81 spin_lock(&wb->list_lock);
7ccf19a8 82 list_for_each_entry(inode, &wb->b_dirty, i_wb_list)
c1955ce3 83 nr_dirty++;
7ccf19a8 84 list_for_each_entry(inode, &wb->b_io, i_wb_list)
c1955ce3 85 nr_io++;
7ccf19a8 86 list_for_each_entry(inode, &wb->b_more_io, i_wb_list)
c1955ce3 87 nr_more_io++;
f758eeab 88 spin_unlock(&wb->list_lock);
76f1418b 89
16c4042f
WF
90 global_dirty_limits(&background_thresh, &dirty_thresh);
91 bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh);
76f1418b
MS
92
93#define K(x) ((x) << (PAGE_SHIFT - 10))
94 seq_printf(m,
00821b00
WF
95 "BdiWriteback: %10lu kB\n"
96 "BdiReclaimable: %10lu kB\n"
97 "BdiDirtyThresh: %10lu kB\n"
98 "DirtyThresh: %10lu kB\n"
99 "BackgroundThresh: %10lu kB\n"
c8e28ce0 100 "BdiDirtied: %10lu kB\n"
00821b00
WF
101 "BdiWritten: %10lu kB\n"
102 "BdiWriteBandwidth: %10lu kBps\n"
103 "b_dirty: %10lu\n"
104 "b_io: %10lu\n"
105 "b_more_io: %10lu\n"
106 "bdi_list: %10u\n"
107 "state: %10lx\n",
76f1418b
MS
108 (unsigned long) K(bdi_stat(bdi, BDI_WRITEBACK)),
109 (unsigned long) K(bdi_stat(bdi, BDI_RECLAIMABLE)),
f7d2b1ec
JK
110 K(bdi_thresh),
111 K(dirty_thresh),
112 K(background_thresh),
c8e28ce0 113 (unsigned long) K(bdi_stat(bdi, BDI_DIRTIED)),
f7d2b1ec 114 (unsigned long) K(bdi_stat(bdi, BDI_WRITTEN)),
00821b00 115 (unsigned long) K(bdi->write_bandwidth),
f7d2b1ec
JK
116 nr_dirty,
117 nr_io,
118 nr_more_io,
c1955ce3 119 !list_empty(&bdi->bdi_list), bdi->state);
76f1418b
MS
120#undef K
121
122 return 0;
123}
124
125static int bdi_debug_stats_open(struct inode *inode, struct file *file)
126{
127 return single_open(file, bdi_debug_stats_show, inode->i_private);
128}
129
130static const struct file_operations bdi_debug_stats_fops = {
131 .open = bdi_debug_stats_open,
132 .read = seq_read,
133 .llseek = seq_lseek,
134 .release = single_release,
135};
136
137static void bdi_debug_register(struct backing_dev_info *bdi, const char *name)
138{
139 bdi->debug_dir = debugfs_create_dir(name, bdi_debug_root);
140 bdi->debug_stats = debugfs_create_file("stats", 0444, bdi->debug_dir,
141 bdi, &bdi_debug_stats_fops);
142}
143
144static void bdi_debug_unregister(struct backing_dev_info *bdi)
145{
146 debugfs_remove(bdi->debug_stats);
147 debugfs_remove(bdi->debug_dir);
148}
149#else
150static inline void bdi_debug_init(void)
151{
152}
153static inline void bdi_debug_register(struct backing_dev_info *bdi,
154 const char *name)
155{
156}
157static inline void bdi_debug_unregister(struct backing_dev_info *bdi)
158{
159}
160#endif
161
cf0ca9fe
PZ
162static ssize_t read_ahead_kb_store(struct device *dev,
163 struct device_attribute *attr,
164 const char *buf, size_t count)
165{
166 struct backing_dev_info *bdi = dev_get_drvdata(dev);
167 char *end;
168 unsigned long read_ahead_kb;
169 ssize_t ret = -EINVAL;
170
171 read_ahead_kb = simple_strtoul(buf, &end, 10);
172 if (*buf && (end[0] == '\0' || (end[0] == '\n' && end[1] == '\0'))) {
173 bdi->ra_pages = read_ahead_kb >> (PAGE_SHIFT - 10);
174 ret = count;
175 }
176 return ret;
177}
178
179#define K(pages) ((pages) << (PAGE_SHIFT - 10))
180
181#define BDI_SHOW(name, expr) \
182static ssize_t name##_show(struct device *dev, \
183 struct device_attribute *attr, char *page) \
184{ \
185 struct backing_dev_info *bdi = dev_get_drvdata(dev); \
186 \
187 return snprintf(page, PAGE_SIZE-1, "%lld\n", (long long)expr); \
188}
189
190BDI_SHOW(read_ahead_kb, K(bdi->ra_pages))
191
189d3c4a
PZ
192static ssize_t min_ratio_store(struct device *dev,
193 struct device_attribute *attr, const char *buf, size_t count)
194{
195 struct backing_dev_info *bdi = dev_get_drvdata(dev);
196 char *end;
197 unsigned int ratio;
198 ssize_t ret = -EINVAL;
199
200 ratio = simple_strtoul(buf, &end, 10);
201 if (*buf && (end[0] == '\0' || (end[0] == '\n' && end[1] == '\0'))) {
202 ret = bdi_set_min_ratio(bdi, ratio);
203 if (!ret)
204 ret = count;
205 }
206 return ret;
207}
208BDI_SHOW(min_ratio, bdi->min_ratio)
209
a42dde04
PZ
210static ssize_t max_ratio_store(struct device *dev,
211 struct device_attribute *attr, const char *buf, size_t count)
212{
213 struct backing_dev_info *bdi = dev_get_drvdata(dev);
214 char *end;
215 unsigned int ratio;
216 ssize_t ret = -EINVAL;
217
218 ratio = simple_strtoul(buf, &end, 10);
219 if (*buf && (end[0] == '\0' || (end[0] == '\n' && end[1] == '\0'))) {
220 ret = bdi_set_max_ratio(bdi, ratio);
221 if (!ret)
222 ret = count;
223 }
224 return ret;
225}
226BDI_SHOW(max_ratio, bdi->max_ratio)
227
cf0ca9fe
PZ
228#define __ATTR_RW(attr) __ATTR(attr, 0644, attr##_show, attr##_store)
229
230static struct device_attribute bdi_dev_attrs[] = {
231 __ATTR_RW(read_ahead_kb),
189d3c4a 232 __ATTR_RW(min_ratio),
a42dde04 233 __ATTR_RW(max_ratio),
cf0ca9fe
PZ
234 __ATTR_NULL,
235};
236
237static __init int bdi_class_init(void)
238{
239 bdi_class = class_create(THIS_MODULE, "bdi");
14421453
AB
240 if (IS_ERR(bdi_class))
241 return PTR_ERR(bdi_class);
242
cf0ca9fe 243 bdi_class->dev_attrs = bdi_dev_attrs;
76f1418b 244 bdi_debug_init();
cf0ca9fe
PZ
245 return 0;
246}
76f1418b 247postcore_initcall(bdi_class_init);
cf0ca9fe 248
26160158
JA
249static int __init default_bdi_init(void)
250{
251 int err;
252
03ba3782
JA
253 sync_supers_tsk = kthread_run(bdi_sync_supers, NULL, "sync_supers");
254 BUG_ON(IS_ERR(sync_supers_tsk));
255
03ba3782 256 setup_timer(&sync_supers_timer, sync_supers_timer_fn, 0);
6423104b 257 bdi_arm_supers_timer();
03ba3782 258
26160158
JA
259 err = bdi_init(&default_backing_dev_info);
260 if (!err)
261 bdi_register(&default_backing_dev_info, NULL, "default");
976e48f8 262 err = bdi_init(&noop_backing_dev_info);
26160158
JA
263
264 return err;
265}
266subsys_initcall(default_bdi_init);
267
03ba3782
JA
268int bdi_has_dirty_io(struct backing_dev_info *bdi)
269{
270 return wb_has_dirty_io(&bdi->wb);
271}
272
03ba3782 273/*
6f904ff0 274 * kupdated() used to do this. We cannot do it from the bdi_forker_thread()
03ba3782
JA
275 * or we risk deadlocking on ->s_umount. The longer term solution would be
276 * to implement sync_supers_bdi() or similar and simply do it from the
6f904ff0 277 * bdi writeback thread individually.
03ba3782
JA
278 */
279static int bdi_sync_supers(void *unused)
280{
281 set_user_nice(current, 0);
282
283 while (!kthread_should_stop()) {
284 set_current_state(TASK_INTERRUPTIBLE);
285 schedule();
286
287 /*
288 * Do this periodically, like kupdated() did before.
289 */
290 sync_supers();
291 }
292
293 return 0;
294}
295
6423104b 296void bdi_arm_supers_timer(void)
03ba3782
JA
297{
298 unsigned long next;
299
6423104b
JA
300 if (!dirty_writeback_interval)
301 return;
302
03ba3782
JA
303 next = msecs_to_jiffies(dirty_writeback_interval * 10) + jiffies;
304 mod_timer(&sync_supers_timer, round_jiffies_up(next));
305}
306
307static void sync_supers_timer_fn(unsigned long unused)
308{
309 wake_up_process(sync_supers_tsk);
6423104b 310 bdi_arm_supers_timer();
03ba3782
JA
311}
312
6467716a
AB
313static void wakeup_timer_fn(unsigned long data)
314{
315 struct backing_dev_info *bdi = (struct backing_dev_info *)data;
316
317 spin_lock_bh(&bdi->wb_lock);
318 if (bdi->wb.task) {
60332023 319 trace_writeback_wake_thread(bdi);
6467716a 320 wake_up_process(bdi->wb.task);
2673b4cf 321 } else if (bdi->dev) {
6467716a
AB
322 /*
323 * When bdi tasks are inactive for long time, they are killed.
324 * In this case we have to wake-up the forker thread which
325 * should create and run the bdi thread.
326 */
60332023 327 trace_writeback_wake_forker_thread(bdi);
6467716a
AB
328 wake_up_process(default_backing_dev_info.wb.task);
329 }
330 spin_unlock_bh(&bdi->wb_lock);
331}
332
333/*
334 * This function is used when the first inode for this bdi is marked dirty. It
335 * wakes-up the corresponding bdi thread which should then take care of the
336 * periodic background write-out of dirty inodes. Since the write-out would
337 * starts only 'dirty_writeback_interval' centisecs from now anyway, we just
338 * set up a timer which wakes the bdi thread up later.
339 *
340 * Note, we wouldn't bother setting up the timer, but this function is on the
341 * fast-path (used by '__mark_inode_dirty()'), so we save few context switches
342 * by delaying the wake-up.
343 */
344void bdi_wakeup_thread_delayed(struct backing_dev_info *bdi)
345{
346 unsigned long timeout;
347
348 timeout = msecs_to_jiffies(dirty_writeback_interval * 10);
349 mod_timer(&bdi->wb.wakeup_timer, jiffies + timeout);
350}
351
fff5b85a
AB
352/*
353 * Calculate the longest interval (jiffies) bdi threads are allowed to be
354 * inactive.
355 */
356static unsigned long bdi_longest_inactive(void)
357{
358 unsigned long interval;
359
360 interval = msecs_to_jiffies(dirty_writeback_interval * 10);
361 return max(5UL * 60 * HZ, interval);
362}
363
5a042aa4
JK
364/*
365 * Clear pending bit and wakeup anybody waiting for flusher thread creation or
366 * shutdown
367 */
368static void bdi_clear_pending(struct backing_dev_info *bdi)
369{
370 clear_bit(BDI_pending, &bdi->state);
371 smp_mb__after_clear_bit();
372 wake_up_bit(&bdi->state, BDI_pending);
373}
374
6f904ff0 375static int bdi_forker_thread(void *ptr)
03ba3782
JA
376{
377 struct bdi_writeback *me = ptr;
378
766f9164 379 current->flags |= PF_SWAPWRITE;
c1955ce3
CH
380 set_freezable();
381
382 /*
383 * Our parent may run at a different priority, just set us to normal
384 */
385 set_user_nice(current, 0);
03ba3782
JA
386
387 for (;;) {
fff5b85a 388 struct task_struct *task = NULL;
78c40cb6 389 struct backing_dev_info *bdi;
adf39240
AB
390 enum {
391 NO_ACTION, /* Nothing to do */
392 FORK_THREAD, /* Fork bdi thread */
fff5b85a 393 KILL_THREAD, /* Kill inactive bdi thread */
adf39240 394 } action = NO_ACTION;
03ba3782
JA
395
396 /*
397 * Temporary measure, we want to make sure we don't see
398 * dirty data on the default backing_dev_info
399 */
6467716a
AB
400 if (wb_has_dirty_io(me) || !list_empty(&me->bdi->work_list)) {
401 del_timer(&me->wakeup_timer);
03ba3782 402 wb_do_writeback(me, 0);
6467716a 403 }
03ba3782 404
cfc4ba53 405 spin_lock_bh(&bdi_lock);
09f40f98
JK
406 /*
407 * In the following loop we are going to check whether we have
408 * some work to do without any synchronization with tasks
20c8c628
AM
409 * waking us up to do work for them. Set the task state here
410 * so that we don't miss wakeups after verifying conditions.
09f40f98 411 */
c5f7ad23 412 set_current_state(TASK_INTERRUPTIBLE);
03ba3782 413
78c40cb6 414 list_for_each_entry(bdi, &bdi_list, bdi_list) {
adf39240
AB
415 bool have_dirty_io;
416
417 if (!bdi_cap_writeback_dirty(bdi) ||
418 bdi_cap_flush_forker(bdi))
03ba3782
JA
419 continue;
420
080dcec4
AB
421 WARN(!test_bit(BDI_registered, &bdi->state),
422 "bdi %p/%s is not registered!\n", bdi, bdi->name);
423
adf39240
AB
424 have_dirty_io = !list_empty(&bdi->work_list) ||
425 wb_has_dirty_io(&bdi->wb);
78c40cb6
AB
426
427 /*
adf39240
AB
428 * If the bdi has work to do, but the thread does not
429 * exist - create it.
78c40cb6 430 */
adf39240
AB
431 if (!bdi->wb.task && have_dirty_io) {
432 /*
433 * Set the pending bit - if someone will try to
434 * unregister this bdi - it'll wait on this bit.
435 */
436 set_bit(BDI_pending, &bdi->state);
437 action = FORK_THREAD;
438 break;
439 }
fff5b85a 440
6bf05d03
JA
441 spin_lock(&bdi->wb_lock);
442
fff5b85a
AB
443 /*
444 * If there is no work to do and the bdi thread was
445 * inactive long enough - kill it. The wb_lock is taken
446 * to make sure no-one adds more work to this bdi and
447 * wakes the bdi thread up.
448 */
449 if (bdi->wb.task && !have_dirty_io &&
450 time_after(jiffies, bdi->wb.last_active +
451 bdi_longest_inactive())) {
452 task = bdi->wb.task;
453 bdi->wb.task = NULL;
454 spin_unlock(&bdi->wb_lock);
455 set_bit(BDI_pending, &bdi->state);
456 action = KILL_THREAD;
457 break;
458 }
6bf05d03 459 spin_unlock(&bdi->wb_lock);
03ba3782 460 }
080dcec4 461 spin_unlock_bh(&bdi_lock);
03ba3782 462
c4ec7908
AB
463 /* Keep working if default bdi still has things to do */
464 if (!list_empty(&me->bdi->work_list))
465 __set_current_state(TASK_RUNNING);
466
adf39240
AB
467 switch (action) {
468 case FORK_THREAD:
469 __set_current_state(TASK_RUNNING);
6628bc74
AB
470 task = kthread_create(bdi_writeback_thread, &bdi->wb,
471 "flush-%s", dev_name(bdi->dev));
adf39240
AB
472 if (IS_ERR(task)) {
473 /*
474 * If thread creation fails, force writeout of
d46db3d5
WF
475 * the bdi from the thread. Hopefully 1024 is
476 * large enough for efficient IO.
adf39240 477 */
0e175a18
CW
478 writeback_inodes_wb(&bdi->wb, 1024,
479 WB_REASON_FORKER_THREAD);
fff5b85a
AB
480 } else {
481 /*
482 * The spinlock makes sure we do not lose
483 * wake-ups when racing with 'bdi_queue_work()'.
6628bc74
AB
484 * And as soon as the bdi thread is visible, we
485 * can start it.
fff5b85a 486 */
6467716a 487 spin_lock_bh(&bdi->wb_lock);
adf39240 488 bdi->wb.task = task;
6467716a 489 spin_unlock_bh(&bdi->wb_lock);
6628bc74 490 wake_up_process(task);
fff5b85a 491 }
5a042aa4 492 bdi_clear_pending(bdi);
fff5b85a
AB
493 break;
494
495 case KILL_THREAD:
496 __set_current_state(TASK_RUNNING);
497 kthread_stop(task);
5a042aa4 498 bdi_clear_pending(bdi);
adf39240 499 break;
03ba3782 500
adf39240 501 case NO_ACTION:
253c34e9
AB
502 if (!wb_has_dirty_io(me) || !dirty_writeback_interval)
503 /*
504 * There are no dirty data. The only thing we
505 * should now care about is checking for
506 * inactive bdi threads and killing them. Thus,
507 * let's sleep for longer time, save energy and
508 * be friendly for battery-driven devices.
509 */
510 schedule_timeout(bdi_longest_inactive());
6423104b 511 else
253c34e9 512 schedule_timeout(msecs_to_jiffies(dirty_writeback_interval * 10));
03ba3782 513 try_to_freeze();
5a042aa4 514 break;
03ba3782 515 }
03ba3782
JA
516 }
517
518 return 0;
519}
520
cfc4ba53
JA
521/*
522 * Remove bdi from bdi_list, and ensure that it is no longer visible
523 */
524static void bdi_remove_from_list(struct backing_dev_info *bdi)
525{
526 spin_lock_bh(&bdi_lock);
527 list_del_rcu(&bdi->bdi_list);
528 spin_unlock_bh(&bdi_lock);
529
ef323088 530 synchronize_rcu_expedited();
cfc4ba53
JA
531}
532
cf0ca9fe
PZ
533int bdi_register(struct backing_dev_info *bdi, struct device *parent,
534 const char *fmt, ...)
535{
cf0ca9fe 536 va_list args;
cf0ca9fe
PZ
537 struct device *dev;
538
69fc208b 539 if (bdi->dev) /* The driver needs to use separate queues per device */
c284de61 540 return 0;
f1d0b063 541
cf0ca9fe 542 va_start(args, fmt);
19051c50 543 dev = device_create_vargs(bdi_class, parent, MKDEV(0, 0), bdi, fmt, args);
cf0ca9fe 544 va_end(args);
c284de61
AB
545 if (IS_ERR(dev))
546 return PTR_ERR(dev);
66f3b8e2 547
cf0ca9fe 548 bdi->dev = dev;
cf0ca9fe 549
03ba3782
JA
550 /*
551 * Just start the forker thread for our default backing_dev_info,
552 * and add other bdi's to the list. They will get a thread created
553 * on-demand when they need it.
554 */
555 if (bdi_cap_flush_forker(bdi)) {
556 struct bdi_writeback *wb = &bdi->wb;
557
6f904ff0 558 wb->task = kthread_run(bdi_forker_thread, wb, "bdi-%s",
03ba3782 559 dev_name(dev));
c284de61
AB
560 if (IS_ERR(wb->task))
561 return PTR_ERR(wb->task);
03ba3782
JA
562 }
563
564 bdi_debug_register(bdi, dev_name(dev));
500b067c 565 set_bit(BDI_registered, &bdi->state);
c284de61
AB
566
567 spin_lock_bh(&bdi_lock);
568 list_add_tail_rcu(&bdi->bdi_list, &bdi_list);
569 spin_unlock_bh(&bdi_lock);
570
455b2864 571 trace_writeback_bdi_register(bdi);
c284de61 572 return 0;
cf0ca9fe
PZ
573}
574EXPORT_SYMBOL(bdi_register);
575
576int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev)
577{
578 return bdi_register(bdi, NULL, "%u:%u", MAJOR(dev), MINOR(dev));
579}
580EXPORT_SYMBOL(bdi_register_dev);
581
03ba3782
JA
582/*
583 * Remove bdi from the global list and shutdown any threads we have running
584 */
585static void bdi_wb_shutdown(struct backing_dev_info *bdi)
66f3b8e2 586{
2673b4cf
RV
587 struct task_struct *task;
588
03ba3782
JA
589 if (!bdi_cap_writeback_dirty(bdi))
590 return;
591
592 /*
fff5b85a 593 * Make sure nobody finds us on the bdi_list anymore
03ba3782 594 */
fff5b85a 595 bdi_remove_from_list(bdi);
03ba3782
JA
596
597 /*
fff5b85a 598 * If setup is pending, wait for that to complete first
03ba3782 599 */
fff5b85a
AB
600 wait_on_bit(&bdi->state, BDI_pending, bdi_sched_wait,
601 TASK_UNINTERRUPTIBLE);
03ba3782
JA
602
603 /*
c1955ce3 604 * Finally, kill the kernel thread. We don't need to be RCU
8a32c441 605 * safe anymore, since the bdi is gone from visibility.
03ba3782 606 */
2673b4cf
RV
607 spin_lock_bh(&bdi->wb_lock);
608 task = bdi->wb.task;
609 bdi->wb.task = NULL;
610 spin_unlock_bh(&bdi->wb_lock);
611
612 if (task)
613 kthread_stop(task);
66f3b8e2
JA
614}
615
592b09a4
JA
616/*
617 * This bdi is going away now, make sure that no super_blocks point to it
618 */
619static void bdi_prune_sb(struct backing_dev_info *bdi)
620{
621 struct super_block *sb;
622
623 spin_lock(&sb_lock);
624 list_for_each_entry(sb, &super_blocks, s_list) {
625 if (sb->s_bdi == bdi)
95f28604 626 sb->s_bdi = &default_backing_dev_info;
592b09a4
JA
627 }
628 spin_unlock(&sb_lock);
629}
630
cf0ca9fe
PZ
631void bdi_unregister(struct backing_dev_info *bdi)
632{
2673b4cf
RV
633 struct device *dev = bdi->dev;
634
635 if (dev) {
ccb6108f 636 bdi_set_min_ratio(bdi, 0);
455b2864 637 trace_writeback_bdi_unregister(bdi);
8c4db335 638 bdi_prune_sb(bdi);
6467716a 639 del_timer_sync(&bdi->wb.wakeup_timer);
8c4db335 640
03ba3782
JA
641 if (!bdi_cap_flush_forker(bdi))
642 bdi_wb_shutdown(bdi);
76f1418b 643 bdi_debug_unregister(bdi);
2673b4cf
RV
644
645 spin_lock_bh(&bdi->wb_lock);
cf0ca9fe 646 bdi->dev = NULL;
2673b4cf
RV
647 spin_unlock_bh(&bdi->wb_lock);
648
649 device_unregister(dev);
cf0ca9fe
PZ
650 }
651}
652EXPORT_SYMBOL(bdi_unregister);
3fcfab16 653
6467716a
AB
654static void bdi_wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi)
655{
656 memset(wb, 0, sizeof(*wb));
657
658 wb->bdi = bdi;
659 wb->last_old_flush = jiffies;
660 INIT_LIST_HEAD(&wb->b_dirty);
661 INIT_LIST_HEAD(&wb->b_io);
662 INIT_LIST_HEAD(&wb->b_more_io);
f758eeab 663 spin_lock_init(&wb->list_lock);
6467716a
AB
664 setup_timer(&wb->wakeup_timer, wakeup_timer_fn, (unsigned long)bdi);
665}
666
e98be2d5
WF
667/*
668 * Initial write bandwidth: 100 MB/s
669 */
670#define INIT_BW (100 << (20 - PAGE_SHIFT))
671
b2e8fb6e
PZ
672int bdi_init(struct backing_dev_info *bdi)
673{
03ba3782 674 int i, err;
b2e8fb6e 675
cf0ca9fe
PZ
676 bdi->dev = NULL;
677
189d3c4a 678 bdi->min_ratio = 0;
a42dde04 679 bdi->max_ratio = 100;
eb608e3a 680 bdi->max_prop_frac = FPROP_FRAC_BASE;
03ba3782 681 spin_lock_init(&bdi->wb_lock);
66f3b8e2 682 INIT_LIST_HEAD(&bdi->bdi_list);
03ba3782
JA
683 INIT_LIST_HEAD(&bdi->work_list);
684
685 bdi_wb_init(&bdi->wb, bdi);
686
b2e8fb6e 687 for (i = 0; i < NR_BDI_STAT_ITEMS; i++) {
ea319518 688 err = percpu_counter_init(&bdi->bdi_stat[i], 0);
04fbfdc1
PZ
689 if (err)
690 goto err;
691 }
692
693 bdi->dirty_exceeded = 0;
e98be2d5
WF
694
695 bdi->bw_time_stamp = jiffies;
696 bdi->written_stamp = 0;
697
7381131c 698 bdi->balanced_dirty_ratelimit = INIT_BW;
be3ffa27 699 bdi->dirty_ratelimit = INIT_BW;
e98be2d5
WF
700 bdi->write_bandwidth = INIT_BW;
701 bdi->avg_write_bandwidth = INIT_BW;
702
eb608e3a 703 err = fprop_local_init_percpu(&bdi->completions);
04fbfdc1
PZ
704
705 if (err) {
706err:
4b01a0b1 707 while (i--)
04fbfdc1 708 percpu_counter_destroy(&bdi->bdi_stat[i]);
b2e8fb6e
PZ
709 }
710
711 return err;
712}
713EXPORT_SYMBOL(bdi_init);
714
715void bdi_destroy(struct backing_dev_info *bdi)
716{
717 int i;
718
ce5f8e77
JA
719 /*
720 * Splice our entries to the default_backing_dev_info, if this
721 * bdi disappears
722 */
723 if (bdi_has_dirty_io(bdi)) {
724 struct bdi_writeback *dst = &default_backing_dev_info.wb;
725
f758eeab 726 bdi_lock_two(&bdi->wb, dst);
ce5f8e77
JA
727 list_splice(&bdi->wb.b_dirty, &dst->b_dirty);
728 list_splice(&bdi->wb.b_io, &dst->b_io);
729 list_splice(&bdi->wb.b_more_io, &dst->b_more_io);
f758eeab
CH
730 spin_unlock(&bdi->wb.list_lock);
731 spin_unlock(&dst->list_lock);
ce5f8e77 732 }
66f3b8e2 733
cf0ca9fe
PZ
734 bdi_unregister(bdi);
735
7a401a97
RV
736 /*
737 * If bdi_unregister() had already been called earlier, the
738 * wakeup_timer could still be armed because bdi_prune_sb()
739 * can race with the bdi_wakeup_thread_delayed() calls from
740 * __mark_inode_dirty().
741 */
742 del_timer_sync(&bdi->wb.wakeup_timer);
743
b2e8fb6e
PZ
744 for (i = 0; i < NR_BDI_STAT_ITEMS; i++)
745 percpu_counter_destroy(&bdi->bdi_stat[i]);
04fbfdc1 746
eb608e3a 747 fprop_local_destroy_percpu(&bdi->completions);
b2e8fb6e
PZ
748}
749EXPORT_SYMBOL(bdi_destroy);
750
c3c53206
JA
751/*
752 * For use from filesystems to quickly init and register a bdi associated
753 * with dirty writeback
754 */
755int bdi_setup_and_register(struct backing_dev_info *bdi, char *name,
756 unsigned int cap)
757{
758 char tmp[32];
759 int err;
760
761 bdi->name = name;
762 bdi->capabilities = cap;
763 err = bdi_init(bdi);
764 if (err)
765 return err;
766
767 sprintf(tmp, "%.28s%s", name, "-%d");
768 err = bdi_register(bdi, NULL, tmp, atomic_long_inc_return(&bdi_seq));
769 if (err) {
770 bdi_destroy(bdi);
771 return err;
772 }
773
774 return 0;
775}
776EXPORT_SYMBOL(bdi_setup_and_register);
777
3fcfab16
AM
778static wait_queue_head_t congestion_wqh[2] = {
779 __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[0]),
780 __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1])
781 };
0e093d99 782static atomic_t nr_bdi_congested[2];
3fcfab16 783
1faa16d2 784void clear_bdi_congested(struct backing_dev_info *bdi, int sync)
3fcfab16
AM
785{
786 enum bdi_state bit;
1faa16d2 787 wait_queue_head_t *wqh = &congestion_wqh[sync];
3fcfab16 788
1faa16d2 789 bit = sync ? BDI_sync_congested : BDI_async_congested;
0e093d99
MG
790 if (test_and_clear_bit(bit, &bdi->state))
791 atomic_dec(&nr_bdi_congested[sync]);
3fcfab16
AM
792 smp_mb__after_clear_bit();
793 if (waitqueue_active(wqh))
794 wake_up(wqh);
795}
796EXPORT_SYMBOL(clear_bdi_congested);
797
1faa16d2 798void set_bdi_congested(struct backing_dev_info *bdi, int sync)
3fcfab16
AM
799{
800 enum bdi_state bit;
801
1faa16d2 802 bit = sync ? BDI_sync_congested : BDI_async_congested;
0e093d99
MG
803 if (!test_and_set_bit(bit, &bdi->state))
804 atomic_inc(&nr_bdi_congested[sync]);
3fcfab16
AM
805}
806EXPORT_SYMBOL(set_bdi_congested);
807
808/**
809 * congestion_wait - wait for a backing_dev to become uncongested
8aa7e847 810 * @sync: SYNC or ASYNC IO
3fcfab16
AM
811 * @timeout: timeout in jiffies
812 *
813 * Waits for up to @timeout jiffies for a backing_dev (any backing_dev) to exit
814 * write congestion. If no backing_devs are congested then just wait for the
815 * next write to be completed.
816 */
8aa7e847 817long congestion_wait(int sync, long timeout)
3fcfab16
AM
818{
819 long ret;
52bb9198 820 unsigned long start = jiffies;
3fcfab16 821 DEFINE_WAIT(wait);
8aa7e847 822 wait_queue_head_t *wqh = &congestion_wqh[sync];
3fcfab16
AM
823
824 prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
825 ret = io_schedule_timeout(timeout);
826 finish_wait(wqh, &wait);
52bb9198
MG
827
828 trace_writeback_congestion_wait(jiffies_to_usecs(timeout),
829 jiffies_to_usecs(jiffies - start));
830
3fcfab16
AM
831 return ret;
832}
833EXPORT_SYMBOL(congestion_wait);
04fbfdc1 834
0e093d99
MG
835/**
836 * wait_iff_congested - Conditionally wait for a backing_dev to become uncongested or a zone to complete writes
837 * @zone: A zone to check if it is heavily congested
838 * @sync: SYNC or ASYNC IO
839 * @timeout: timeout in jiffies
840 *
841 * In the event of a congested backing_dev (any backing_dev) and the given
842 * @zone has experienced recent congestion, this waits for up to @timeout
843 * jiffies for either a BDI to exit congestion of the given @sync queue
844 * or a write to complete.
845 *
25985edc 846 * In the absence of zone congestion, cond_resched() is called to yield
0e093d99
MG
847 * the processor if necessary but otherwise does not sleep.
848 *
849 * The return value is 0 if the sleep is for the full timeout. Otherwise,
850 * it is the number of jiffies that were still remaining when the function
851 * returned. return_value == timeout implies the function did not sleep.
852 */
853long wait_iff_congested(struct zone *zone, int sync, long timeout)
854{
855 long ret;
856 unsigned long start = jiffies;
857 DEFINE_WAIT(wait);
858 wait_queue_head_t *wqh = &congestion_wqh[sync];
859
860 /*
861 * If there is no congestion, or heavy congestion is not being
862 * encountered in the current zone, yield if necessary instead
863 * of sleeping on the congestion queue
864 */
865 if (atomic_read(&nr_bdi_congested[sync]) == 0 ||
866 !zone_is_reclaim_congested(zone)) {
867 cond_resched();
868
869 /* In case we scheduled, work out time remaining */
870 ret = timeout - (jiffies - start);
871 if (ret < 0)
872 ret = 0;
873
874 goto out;
875 }
876
877 /* Sleep until uncongested or a write happens */
878 prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
879 ret = io_schedule_timeout(timeout);
880 finish_wait(wqh, &wait);
881
882out:
883 trace_writeback_wait_iff_congested(jiffies_to_usecs(timeout),
884 jiffies_to_usecs(jiffies - start));
885
886 return ret;
887}
888EXPORT_SYMBOL(wait_iff_congested);