b27406d51bc74c6df25037f415107ce1f3b5a8b6
[linux-block.git] / fs / fs-writeback.c
1 /*
2  * fs/fs-writeback.c
3  *
4  * Copyright (C) 2002, Linus Torvalds.
5  *
6  * Contains all the functions related to writing back and waiting
7  * upon dirty inodes against superblocks, and writing back dirty
8  * pages against inodes.  ie: data writeback.  Writeout of the
9  * inode itself is not handled here.
10  *
11  * 10Apr2002    Andrew Morton
12  *              Split out of fs/inode.c
13  *              Additions for address_space-based writeback
14  */
15
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/spinlock.h>
19 #include <linux/sched.h>
20 #include <linux/fs.h>
21 #include <linux/mm.h>
22 #include <linux/kthread.h>
23 #include <linux/freezer.h>
24 #include <linux/writeback.h>
25 #include <linux/blkdev.h>
26 #include <linux/backing-dev.h>
27 #include <linux/buffer_head.h>
28 #include "internal.h"
29
30 #define inode_to_bdi(inode)     ((inode)->i_mapping->backing_dev_info)
31
32 /*
33  * We don't actually have pdflush, but this one is exported though /proc...
34  */
35 int nr_pdflush_threads;
36
37 /*
38  * Passed into wb_writeback(), essentially a subset of writeback_control
39  */
40 struct wb_writeback_args {
41         long nr_pages;
42         struct super_block *sb;
43         enum writeback_sync_modes sync_mode;
44         int for_kupdate:1;
45         int range_cyclic:1;
46         int for_background:1;
47 };
48
49 /*
50  * Work items for the bdi_writeback threads
51  */
52 struct bdi_work {
53         struct list_head list;          /* pending work list */
54         struct rcu_head rcu_head;       /* for RCU free/clear of work */
55
56         unsigned long seen;             /* threads that have seen this work */
57         atomic_t pending;               /* number of threads still to do work */
58
59         struct wb_writeback_args args;  /* writeback arguments */
60
61         unsigned long state;            /* flag bits, see WS_* */
62 };
63
64 enum {
65         WS_USED_B = 0,
66         WS_ONSTACK_B,
67 };
68
69 #define WS_USED (1 << WS_USED_B)
70 #define WS_ONSTACK (1 << WS_ONSTACK_B)
71
72 static inline bool bdi_work_on_stack(struct bdi_work *work)
73 {
74         return test_bit(WS_ONSTACK_B, &work->state);
75 }
76
77 static inline void bdi_work_init(struct bdi_work *work,
78                                  struct wb_writeback_args *args)
79 {
80         INIT_RCU_HEAD(&work->rcu_head);
81         work->args = *args;
82         work->state = WS_USED;
83 }
84
85 /**
86  * writeback_in_progress - determine whether there is writeback in progress
87  * @bdi: the device's backing_dev_info structure.
88  *
89  * Determine whether there is writeback waiting to be handled against a
90  * backing device.
91  */
92 int writeback_in_progress(struct backing_dev_info *bdi)
93 {
94         return !list_empty(&bdi->work_list);
95 }
96
97 static void bdi_work_clear(struct bdi_work *work)
98 {
99         clear_bit(WS_USED_B, &work->state);
100         smp_mb__after_clear_bit();
101         /*
102          * work can have disappeared at this point. bit waitq functions
103          * should be able to tolerate this, provided bdi_sched_wait does
104          * not dereference it's pointer argument.
105         */
106         wake_up_bit(&work->state, WS_USED_B);
107 }
108
109 static void bdi_work_free(struct rcu_head *head)
110 {
111         struct bdi_work *work = container_of(head, struct bdi_work, rcu_head);
112
113         if (!bdi_work_on_stack(work))
114                 kfree(work);
115         else
116                 bdi_work_clear(work);
117 }
118
119 static void wb_work_complete(struct bdi_work *work)
120 {
121         const enum writeback_sync_modes sync_mode = work->args.sync_mode;
122         int onstack = bdi_work_on_stack(work);
123
124         /*
125          * For allocated work, we can clear the done/seen bit right here.
126          * For on-stack work, we need to postpone both the clear and free
127          * to after the RCU grace period, since the stack could be invalidated
128          * as soon as bdi_work_clear() has done the wakeup.
129          */
130         if (!onstack)
131                 bdi_work_clear(work);
132         if (sync_mode == WB_SYNC_NONE || onstack)
133                 call_rcu(&work->rcu_head, bdi_work_free);
134 }
135
136 static void wb_clear_pending(struct bdi_writeback *wb, struct bdi_work *work)
137 {
138         /*
139          * The caller has retrieved the work arguments from this work,
140          * drop our reference. If this is the last ref, delete and free it
141          */
142         if (atomic_dec_and_test(&work->pending)) {
143                 struct backing_dev_info *bdi = wb->bdi;
144
145                 spin_lock(&bdi->wb_lock);
146                 list_del_rcu(&work->list);
147                 spin_unlock(&bdi->wb_lock);
148
149                 wb_work_complete(work);
150         }
151 }
152
153 static void bdi_queue_work(struct backing_dev_info *bdi, struct bdi_work *work)
154 {
155         work->seen = bdi->wb_mask;
156         BUG_ON(!work->seen);
157         atomic_set(&work->pending, bdi->wb_cnt);
158         BUG_ON(!bdi->wb_cnt);
159
160         /*
161          * list_add_tail_rcu() contains the necessary barriers to
162          * make sure the above stores are seen before the item is
163          * noticed on the list
164          */
165         spin_lock(&bdi->wb_lock);
166         list_add_tail_rcu(&work->list, &bdi->work_list);
167         spin_unlock(&bdi->wb_lock);
168
169         /*
170          * If the default thread isn't there, make sure we add it. When
171          * it gets created and wakes up, we'll run this work.
172          */
173         if (unlikely(list_empty_careful(&bdi->wb_list)))
174                 wake_up_process(default_backing_dev_info.wb.task);
175         else {
176                 struct bdi_writeback *wb = &bdi->wb;
177
178                 if (wb->task)
179                         wake_up_process(wb->task);
180         }
181 }
182
183 /*
184  * Used for on-stack allocated work items. The caller needs to wait until
185  * the wb threads have acked the work before it's safe to continue.
186  */
187 static void bdi_wait_on_work_clear(struct bdi_work *work)
188 {
189         wait_on_bit(&work->state, WS_USED_B, bdi_sched_wait,
190                     TASK_UNINTERRUPTIBLE);
191 }
192
193 static void bdi_alloc_queue_work(struct backing_dev_info *bdi,
194                                  struct wb_writeback_args *args)
195 {
196         struct bdi_work *work;
197
198         /*
199          * This is WB_SYNC_NONE writeback, so if allocation fails just
200          * wakeup the thread for old dirty data writeback
201          */
202         work = kmalloc(sizeof(*work), GFP_ATOMIC);
203         if (work) {
204                 bdi_work_init(work, args);
205                 bdi_queue_work(bdi, work);
206         } else {
207                 struct bdi_writeback *wb = &bdi->wb;
208
209                 if (wb->task)
210                         wake_up_process(wb->task);
211         }
212 }
213
214 /**
215  * bdi_sync_writeback - start and wait for writeback
216  * @bdi: the backing device to write from
217  * @sb: write inodes from this super_block
218  *
219  * Description:
220  *   This does WB_SYNC_ALL data integrity writeback and waits for the
221  *   IO to complete. Callers must hold the sb s_umount semaphore for
222  *   reading, to avoid having the super disappear before we are done.
223  */
224 static void bdi_sync_writeback(struct backing_dev_info *bdi,
225                                struct super_block *sb)
226 {
227         struct wb_writeback_args args = {
228                 .sb             = sb,
229                 .sync_mode      = WB_SYNC_ALL,
230                 .nr_pages       = LONG_MAX,
231                 .range_cyclic   = 0,
232         };
233         struct bdi_work work;
234
235         bdi_work_init(&work, &args);
236         work.state |= WS_ONSTACK;
237
238         bdi_queue_work(bdi, &work);
239         bdi_wait_on_work_clear(&work);
240 }
241
242 /**
243  * bdi_start_writeback - start writeback
244  * @bdi: the backing device to write from
245  * @nr_pages: the number of pages to write
246  *
247  * Description:
248  *   This does WB_SYNC_NONE opportunistic writeback. The IO is only
249  *   started when this function returns, we make no guarentees on
250  *   completion. Caller need not hold sb s_umount semaphore.
251  *
252  */
253 void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages)
254 {
255         struct wb_writeback_args args = {
256                 .sync_mode      = WB_SYNC_NONE,
257                 .nr_pages       = nr_pages,
258                 .range_cyclic   = 1,
259         };
260
261         /*
262          * We treat @nr_pages=0 as the special case to do background writeback,
263          * ie. to sync pages until the background dirty threshold is reached.
264          */
265         if (!nr_pages) {
266                 args.nr_pages = LONG_MAX;
267                 args.for_background = 1;
268         }
269
270         bdi_alloc_queue_work(bdi, &args);
271 }
272
273 /*
274  * Redirty an inode: set its when-it-was dirtied timestamp and move it to the
275  * furthest end of its superblock's dirty-inode list.
276  *
277  * Before stamping the inode's ->dirtied_when, we check to see whether it is
278  * already the most-recently-dirtied inode on the b_dirty list.  If that is
279  * the case then the inode must have been redirtied while it was being written
280  * out and we don't reset its dirtied_when.
281  */
282 static void redirty_tail(struct inode *inode)
283 {
284         struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
285
286         if (!list_empty(&wb->b_dirty)) {
287                 struct inode *tail;
288
289                 tail = list_entry(wb->b_dirty.next, struct inode, i_list);
290                 if (time_before(inode->dirtied_when, tail->dirtied_when))
291                         inode->dirtied_when = jiffies;
292         }
293         list_move(&inode->i_list, &wb->b_dirty);
294 }
295
296 /*
297  * requeue inode for re-scanning after bdi->b_io list is exhausted.
298  */
299 static void requeue_io(struct inode *inode)
300 {
301         struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
302
303         list_move(&inode->i_list, &wb->b_more_io);
304 }
305
306 static void inode_sync_complete(struct inode *inode)
307 {
308         /*
309          * Prevent speculative execution through spin_unlock(&inode_lock);
310          */
311         smp_mb();
312         wake_up_bit(&inode->i_state, __I_SYNC);
313 }
314
315 static bool inode_dirtied_after(struct inode *inode, unsigned long t)
316 {
317         bool ret = time_after(inode->dirtied_when, t);
318 #ifndef CONFIG_64BIT
319         /*
320          * For inodes being constantly redirtied, dirtied_when can get stuck.
321          * It _appears_ to be in the future, but is actually in distant past.
322          * This test is necessary to prevent such wrapped-around relative times
323          * from permanently stopping the whole bdi writeback.
324          */
325         ret = ret && time_before_eq(inode->dirtied_when, jiffies);
326 #endif
327         return ret;
328 }
329
330 /*
331  * Move expired dirty inodes from @delaying_queue to @dispatch_queue.
332  */
333 static void move_expired_inodes(struct list_head *delaying_queue,
334                                struct list_head *dispatch_queue,
335                                 unsigned long *older_than_this)
336 {
337         LIST_HEAD(tmp);
338         struct list_head *pos, *node;
339         struct super_block *sb;
340         struct inode *inode;
341
342         while (!list_empty(delaying_queue)) {
343                 inode = list_entry(delaying_queue->prev, struct inode, i_list);
344                 if (older_than_this &&
345                     inode_dirtied_after(inode, *older_than_this))
346                         break;
347                 list_move(&inode->i_list, &tmp);
348         }
349
350         /* Move inodes from one superblock together */
351         while (!list_empty(&tmp)) {
352                 inode = list_entry(tmp.prev, struct inode, i_list);
353                 sb = inode->i_sb;
354                 list_for_each_prev_safe(pos, node, &tmp) {
355                         inode = list_entry(pos, struct inode, i_list);
356                         if (inode->i_sb == sb)
357                                 list_move(&inode->i_list, dispatch_queue);
358                 }
359         }
360 }
361
362 /*
363  * Queue all expired dirty inodes for io, eldest first.
364  */
365 static void queue_io(struct bdi_writeback *wb, unsigned long *older_than_this)
366 {
367         list_splice_init(&wb->b_more_io, wb->b_io.prev);
368         move_expired_inodes(&wb->b_dirty, &wb->b_io, older_than_this);
369 }
370
371 static int write_inode(struct inode *inode, int sync)
372 {
373         if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode))
374                 return inode->i_sb->s_op->write_inode(inode, sync);
375         return 0;
376 }
377
378 /*
379  * Wait for writeback on an inode to complete.
380  */
381 static void inode_wait_for_writeback(struct inode *inode)
382 {
383         DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC);
384         wait_queue_head_t *wqh;
385
386         wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
387         do {
388                 spin_unlock(&inode_lock);
389                 __wait_on_bit(wqh, &wq, inode_wait, TASK_UNINTERRUPTIBLE);
390                 spin_lock(&inode_lock);
391         } while (inode->i_state & I_SYNC);
392 }
393
394 /*
395  * Write out an inode's dirty pages.  Called under inode_lock.  Either the
396  * caller has ref on the inode (either via __iget or via syscall against an fd)
397  * or the inode has I_WILL_FREE set (via generic_forget_inode)
398  *
399  * If `wait' is set, wait on the writeout.
400  *
401  * The whole writeout design is quite complex and fragile.  We want to avoid
402  * starvation of particular inodes when others are being redirtied, prevent
403  * livelocks, etc.
404  *
405  * Called under inode_lock.
406  */
407 static int
408 writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
409 {
410         struct address_space *mapping = inode->i_mapping;
411         int wait = wbc->sync_mode == WB_SYNC_ALL;
412         unsigned dirty;
413         int ret;
414
415         if (!atomic_read(&inode->i_count))
416                 WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING)));
417         else
418                 WARN_ON(inode->i_state & I_WILL_FREE);
419
420         if (inode->i_state & I_SYNC) {
421                 /*
422                  * If this inode is locked for writeback and we are not doing
423                  * writeback-for-data-integrity, move it to b_more_io so that
424                  * writeback can proceed with the other inodes on s_io.
425                  *
426                  * We'll have another go at writing back this inode when we
427                  * completed a full scan of b_io.
428                  */
429                 if (!wait) {
430                         requeue_io(inode);
431                         return 0;
432                 }
433
434                 /*
435                  * It's a data-integrity sync.  We must wait.
436                  */
437                 inode_wait_for_writeback(inode);
438         }
439
440         BUG_ON(inode->i_state & I_SYNC);
441
442         /* Set I_SYNC, reset I_DIRTY */
443         dirty = inode->i_state & I_DIRTY;
444         inode->i_state |= I_SYNC;
445         inode->i_state &= ~I_DIRTY;
446
447         spin_unlock(&inode_lock);
448
449         ret = do_writepages(mapping, wbc);
450
451         /* Don't write the inode if only I_DIRTY_PAGES was set */
452         if (dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
453                 int err = write_inode(inode, wait);
454                 if (ret == 0)
455                         ret = err;
456         }
457
458         if (wait) {
459                 int err = filemap_fdatawait(mapping);
460                 if (ret == 0)
461                         ret = err;
462         }
463
464         spin_lock(&inode_lock);
465         inode->i_state &= ~I_SYNC;
466         if (!(inode->i_state & (I_FREEING | I_CLEAR))) {
467                 if (inode->i_state & I_DIRTY) {
468                         /*
469                          * Someone redirtied the inode while were writing back
470                          * the pages.
471                          */
472                         redirty_tail(inode);
473                 } else if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
474                         /*
475                          * We didn't write back all the pages.  nfs_writepages()
476                          * sometimes bales out without doing anything. Redirty
477                          * the inode; Move it from b_io onto b_more_io/b_dirty.
478                          */
479                         /*
480                          * akpm: if the caller was the kupdate function we put
481                          * this inode at the head of b_dirty so it gets first
482                          * consideration.  Otherwise, move it to the tail, for
483                          * the reasons described there.  I'm not really sure
484                          * how much sense this makes.  Presumably I had a good
485                          * reasons for doing it this way, and I'd rather not
486                          * muck with it at present.
487                          */
488                         if (wbc->for_kupdate) {
489                                 /*
490                                  * For the kupdate function we move the inode
491                                  * to b_more_io so it will get more writeout as
492                                  * soon as the queue becomes uncongested.
493                                  */
494                                 inode->i_state |= I_DIRTY_PAGES;
495                                 if (wbc->nr_to_write <= 0) {
496                                         /*
497                                          * slice used up: queue for next turn
498                                          */
499                                         requeue_io(inode);
500                                 } else {
501                                         /*
502                                          * somehow blocked: retry later
503                                          */
504                                         redirty_tail(inode);
505                                 }
506                         } else {
507                                 /*
508                                  * Otherwise fully redirty the inode so that
509                                  * other inodes on this superblock will get some
510                                  * writeout.  Otherwise heavy writing to one
511                                  * file would indefinitely suspend writeout of
512                                  * all the other files.
513                                  */
514                                 inode->i_state |= I_DIRTY_PAGES;
515                                 redirty_tail(inode);
516                         }
517                 } else if (atomic_read(&inode->i_count)) {
518                         /*
519                          * The inode is clean, inuse
520                          */
521                         list_move(&inode->i_list, &inode_in_use);
522                 } else {
523                         /*
524                          * The inode is clean, unused
525                          */
526                         list_move(&inode->i_list, &inode_unused);
527                 }
528         }
529         inode_sync_complete(inode);
530         return ret;
531 }
532
533 /*
534  * For WB_SYNC_NONE writeback, the caller does not have the sb pinned
535  * before calling writeback. So make sure that we do pin it, so it doesn't
536  * go away while we are writing inodes from it.
537  *
538  * Returns 0 if the super was successfully pinned (or pinning wasn't needed),
539  * 1 if we failed.
540  */
541 static int pin_sb_for_writeback(struct writeback_control *wbc,
542                                    struct inode *inode)
543 {
544         struct super_block *sb = inode->i_sb;
545
546         /*
547          * Caller must already hold the ref for this
548          */
549         if (wbc->sync_mode == WB_SYNC_ALL) {
550                 WARN_ON(!rwsem_is_locked(&sb->s_umount));
551                 return 0;
552         }
553
554         spin_lock(&sb_lock);
555         sb->s_count++;
556         if (down_read_trylock(&sb->s_umount)) {
557                 if (sb->s_root) {
558                         spin_unlock(&sb_lock);
559                         return 0;
560                 }
561                 /*
562                  * umounted, drop rwsem again and fall through to failure
563                  */
564                 up_read(&sb->s_umount);
565         }
566
567         sb->s_count--;
568         spin_unlock(&sb_lock);
569         return 1;
570 }
571
572 static void unpin_sb_for_writeback(struct writeback_control *wbc,
573                                    struct inode *inode)
574 {
575         struct super_block *sb = inode->i_sb;
576
577         if (wbc->sync_mode == WB_SYNC_ALL)
578                 return;
579
580         up_read(&sb->s_umount);
581         put_super(sb);
582 }
583
584 static void writeback_inodes_wb(struct bdi_writeback *wb,
585                                 struct writeback_control *wbc)
586 {
587         struct super_block *sb = wbc->sb;
588         const int is_blkdev_sb = sb_is_blkdev_sb(sb);
589         const unsigned long start = jiffies;    /* livelock avoidance */
590
591         spin_lock(&inode_lock);
592
593         if (!wbc->for_kupdate || list_empty(&wb->b_io))
594                 queue_io(wb, wbc->older_than_this);
595
596         while (!list_empty(&wb->b_io)) {
597                 struct inode *inode = list_entry(wb->b_io.prev,
598                                                 struct inode, i_list);
599                 long pages_skipped;
600
601                 /*
602                  * super block given and doesn't match, skip this inode
603                  */
604                 if (sb && sb != inode->i_sb) {
605                         redirty_tail(inode);
606                         continue;
607                 }
608
609                 if (!bdi_cap_writeback_dirty(wb->bdi)) {
610                         redirty_tail(inode);
611                         if (is_blkdev_sb) {
612                                 /*
613                                  * Dirty memory-backed blockdev: the ramdisk
614                                  * driver does this.  Skip just this inode
615                                  */
616                                 continue;
617                         }
618                         /*
619                          * Dirty memory-backed inode against a filesystem other
620                          * than the kernel-internal bdev filesystem.  Skip the
621                          * entire superblock.
622                          */
623                         break;
624                 }
625
626                 if (inode->i_state & (I_NEW | I_WILL_FREE)) {
627                         requeue_io(inode);
628                         continue;
629                 }
630
631                 if (wbc->nonblocking && bdi_write_congested(wb->bdi)) {
632                         wbc->encountered_congestion = 1;
633                         if (!is_blkdev_sb)
634                                 break;          /* Skip a congested fs */
635                         requeue_io(inode);
636                         continue;               /* Skip a congested blockdev */
637                 }
638
639                 /*
640                  * Was this inode dirtied after sync_sb_inodes was called?
641                  * This keeps sync from extra jobs and livelock.
642                  */
643                 if (inode_dirtied_after(inode, start))
644                         break;
645
646                 if (pin_sb_for_writeback(wbc, inode)) {
647                         requeue_io(inode);
648                         continue;
649                 }
650
651                 BUG_ON(inode->i_state & (I_FREEING | I_CLEAR));
652                 __iget(inode);
653                 pages_skipped = wbc->pages_skipped;
654                 writeback_single_inode(inode, wbc);
655                 unpin_sb_for_writeback(wbc, inode);
656                 if (wbc->pages_skipped != pages_skipped) {
657                         /*
658                          * writeback is not making progress due to locked
659                          * buffers.  Skip this inode for now.
660                          */
661                         redirty_tail(inode);
662                 }
663                 spin_unlock(&inode_lock);
664                 iput(inode);
665                 cond_resched();
666                 spin_lock(&inode_lock);
667                 if (wbc->nr_to_write <= 0) {
668                         wbc->more_io = 1;
669                         break;
670                 }
671                 if (!list_empty(&wb->b_more_io))
672                         wbc->more_io = 1;
673         }
674
675         spin_unlock(&inode_lock);
676         /* Leave any unwritten inodes on b_io */
677 }
678
679 void writeback_inodes_wbc(struct writeback_control *wbc)
680 {
681         struct backing_dev_info *bdi = wbc->bdi;
682
683         writeback_inodes_wb(&bdi->wb, wbc);
684 }
685
686 /*
687  * The maximum number of pages to writeout in a single bdi flush/kupdate
688  * operation.  We do this so we don't hold I_SYNC against an inode for
689  * enormous amounts of time, which would block a userspace task which has
690  * been forced to throttle against that inode.  Also, the code reevaluates
691  * the dirty each time it has written this many pages.
692  */
693 #define MAX_WRITEBACK_PAGES     1024
694
695 static inline bool over_bground_thresh(void)
696 {
697         unsigned long background_thresh, dirty_thresh;
698
699         get_dirty_limits(&background_thresh, &dirty_thresh, NULL, NULL);
700
701         return (global_page_state(NR_FILE_DIRTY) +
702                 global_page_state(NR_UNSTABLE_NFS) >= background_thresh);
703 }
704
705 /*
706  * Explicit flushing or periodic writeback of "old" data.
707  *
708  * Define "old": the first time one of an inode's pages is dirtied, we mark the
709  * dirtying-time in the inode's address_space.  So this periodic writeback code
710  * just walks the superblock inode list, writing back any inodes which are
711  * older than a specific point in time.
712  *
713  * Try to run once per dirty_writeback_interval.  But if a writeback event
714  * takes longer than a dirty_writeback_interval interval, then leave a
715  * one-second gap.
716  *
717  * older_than_this takes precedence over nr_to_write.  So we'll only write back
718  * all dirty pages if they are all attached to "old" mappings.
719  */
720 static long wb_writeback(struct bdi_writeback *wb,
721                          struct wb_writeback_args *args)
722 {
723         struct writeback_control wbc = {
724                 .bdi                    = wb->bdi,
725                 .sb                     = args->sb,
726                 .sync_mode              = args->sync_mode,
727                 .older_than_this        = NULL,
728                 .for_kupdate            = args->for_kupdate,
729                 .range_cyclic           = args->range_cyclic,
730         };
731         unsigned long oldest_jif;
732         long wrote = 0;
733         struct inode *inode;
734
735         if (wbc.for_kupdate) {
736                 wbc.older_than_this = &oldest_jif;
737                 oldest_jif = jiffies -
738                                 msecs_to_jiffies(dirty_expire_interval * 10);
739         }
740         if (!wbc.range_cyclic) {
741                 wbc.range_start = 0;
742                 wbc.range_end = LLONG_MAX;
743         }
744
745         for (;;) {
746                 /*
747                  * Stop writeback when nr_pages has been consumed
748                  */
749                 if (args->nr_pages <= 0)
750                         break;
751
752                 /*
753                  * For background writeout, stop when we are below the
754                  * background dirty threshold
755                  */
756                 if (args->for_background && !over_bground_thresh())
757                         break;
758
759                 wbc.more_io = 0;
760                 wbc.encountered_congestion = 0;
761                 wbc.nr_to_write = MAX_WRITEBACK_PAGES;
762                 wbc.pages_skipped = 0;
763                 writeback_inodes_wb(wb, &wbc);
764                 args->nr_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write;
765                 wrote += MAX_WRITEBACK_PAGES - wbc.nr_to_write;
766
767                 /*
768                  * If we consumed everything, see if we have more
769                  */
770                 if (wbc.nr_to_write <= 0)
771                         continue;
772                 /*
773                  * Didn't write everything and we don't have more IO, bail
774                  */
775                 if (!wbc.more_io)
776                         break;
777                 /*
778                  * Did we write something? Try for more
779                  */
780                 if (wbc.nr_to_write < MAX_WRITEBACK_PAGES)
781                         continue;
782                 /*
783                  * Nothing written. Wait for some inode to
784                  * become available for writeback. Otherwise
785                  * we'll just busyloop.
786                  */
787                 spin_lock(&inode_lock);
788                 if (!list_empty(&wb->b_more_io))  {
789                         inode = list_entry(wb->b_more_io.prev,
790                                                 struct inode, i_list);
791                         inode_wait_for_writeback(inode);
792                 }
793                 spin_unlock(&inode_lock);
794         }
795
796         return wrote;
797 }
798
799 /*
800  * Return the next bdi_work struct that hasn't been processed by this
801  * wb thread yet. ->seen is initially set for each thread that exists
802  * for this device, when a thread first notices a piece of work it
803  * clears its bit. Depending on writeback type, the thread will notify
804  * completion on either receiving the work (WB_SYNC_NONE) or after
805  * it is done (WB_SYNC_ALL).
806  */
807 static struct bdi_work *get_next_work_item(struct backing_dev_info *bdi,
808                                            struct bdi_writeback *wb)
809 {
810         struct bdi_work *work, *ret = NULL;
811
812         rcu_read_lock();
813
814         list_for_each_entry_rcu(work, &bdi->work_list, list) {
815                 if (!test_bit(wb->nr, &work->seen))
816                         continue;
817                 clear_bit(wb->nr, &work->seen);
818
819                 ret = work;
820                 break;
821         }
822
823         rcu_read_unlock();
824         return ret;
825 }
826
827 static long wb_check_old_data_flush(struct bdi_writeback *wb)
828 {
829         unsigned long expired;
830         long nr_pages;
831
832         expired = wb->last_old_flush +
833                         msecs_to_jiffies(dirty_writeback_interval * 10);
834         if (time_before(jiffies, expired))
835                 return 0;
836
837         wb->last_old_flush = jiffies;
838         nr_pages = global_page_state(NR_FILE_DIRTY) +
839                         global_page_state(NR_UNSTABLE_NFS) +
840                         (inodes_stat.nr_inodes - inodes_stat.nr_unused);
841
842         if (nr_pages) {
843                 struct wb_writeback_args args = {
844                         .nr_pages       = nr_pages,
845                         .sync_mode      = WB_SYNC_NONE,
846                         .for_kupdate    = 1,
847                         .range_cyclic   = 1,
848                 };
849
850                 return wb_writeback(wb, &args);
851         }
852
853         return 0;
854 }
855
856 /*
857  * Retrieve work items and do the writeback they describe
858  */
859 long wb_do_writeback(struct bdi_writeback *wb, int force_wait)
860 {
861         struct backing_dev_info *bdi = wb->bdi;
862         struct bdi_work *work;
863         long wrote = 0;
864
865         while ((work = get_next_work_item(bdi, wb)) != NULL) {
866                 struct wb_writeback_args args = work->args;
867
868                 /*
869                  * Override sync mode, in case we must wait for completion
870                  */
871                 if (force_wait)
872                         work->args.sync_mode = args.sync_mode = WB_SYNC_ALL;
873
874                 /*
875                  * If this isn't a data integrity operation, just notify
876                  * that we have seen this work and we are now starting it.
877                  */
878                 if (args.sync_mode == WB_SYNC_NONE)
879                         wb_clear_pending(wb, work);
880
881                 wrote += wb_writeback(wb, &args);
882
883                 /*
884                  * This is a data integrity writeback, so only do the
885                  * notification when we have completed the work.
886                  */
887                 if (args.sync_mode == WB_SYNC_ALL)
888                         wb_clear_pending(wb, work);
889         }
890
891         /*
892          * Check for periodic writeback, kupdated() style
893          */
894         wrote += wb_check_old_data_flush(wb);
895
896         return wrote;
897 }
898
899 /*
900  * Handle writeback of dirty data for the device backed by this bdi. Also
901  * wakes up periodically and does kupdated style flushing.
902  */
903 int bdi_writeback_task(struct bdi_writeback *wb)
904 {
905         unsigned long last_active = jiffies;
906         unsigned long wait_jiffies = -1UL;
907         long pages_written;
908
909         while (!kthread_should_stop()) {
910                 pages_written = wb_do_writeback(wb, 0);
911
912                 if (pages_written)
913                         last_active = jiffies;
914                 else if (wait_jiffies != -1UL) {
915                         unsigned long max_idle;
916
917                         /*
918                          * Longest period of inactivity that we tolerate. If we
919                          * see dirty data again later, the task will get
920                          * recreated automatically.
921                          */
922                         max_idle = max(5UL * 60 * HZ, wait_jiffies);
923                         if (time_after(jiffies, max_idle + last_active))
924                                 break;
925                 }
926
927                 wait_jiffies = msecs_to_jiffies(dirty_writeback_interval * 10);
928                 schedule_timeout_interruptible(wait_jiffies);
929                 try_to_freeze();
930         }
931
932         return 0;
933 }
934
935 /*
936  * Schedule writeback for all backing devices. This does WB_SYNC_NONE
937  * writeback, for integrity writeback see bdi_sync_writeback().
938  */
939 static void bdi_writeback_all(struct super_block *sb, long nr_pages)
940 {
941         struct wb_writeback_args args = {
942                 .sb             = sb,
943                 .nr_pages       = nr_pages,
944                 .sync_mode      = WB_SYNC_NONE,
945         };
946         struct backing_dev_info *bdi;
947
948         rcu_read_lock();
949
950         list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) {
951                 if (!bdi_has_dirty_io(bdi))
952                         continue;
953
954                 bdi_alloc_queue_work(bdi, &args);
955         }
956
957         rcu_read_unlock();
958 }
959
960 /*
961  * Start writeback of `nr_pages' pages.  If `nr_pages' is zero, write back
962  * the whole world.
963  */
964 void wakeup_flusher_threads(long nr_pages)
965 {
966         if (nr_pages == 0)
967                 nr_pages = global_page_state(NR_FILE_DIRTY) +
968                                 global_page_state(NR_UNSTABLE_NFS);
969         bdi_writeback_all(NULL, nr_pages);
970 }
971
972 static noinline void block_dump___mark_inode_dirty(struct inode *inode)
973 {
974         if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) {
975                 struct dentry *dentry;
976                 const char *name = "?";
977
978                 dentry = d_find_alias(inode);
979                 if (dentry) {
980                         spin_lock(&dentry->d_lock);
981                         name = (const char *) dentry->d_name.name;
982                 }
983                 printk(KERN_DEBUG
984                        "%s(%d): dirtied inode %lu (%s) on %s\n",
985                        current->comm, task_pid_nr(current), inode->i_ino,
986                        name, inode->i_sb->s_id);
987                 if (dentry) {
988                         spin_unlock(&dentry->d_lock);
989                         dput(dentry);
990                 }
991         }
992 }
993
994 /**
995  *      __mark_inode_dirty -    internal function
996  *      @inode: inode to mark
997  *      @flags: what kind of dirty (i.e. I_DIRTY_SYNC)
998  *      Mark an inode as dirty. Callers should use mark_inode_dirty or
999  *      mark_inode_dirty_sync.
1000  *
1001  * Put the inode on the super block's dirty list.
1002  *
1003  * CAREFUL! We mark it dirty unconditionally, but move it onto the
1004  * dirty list only if it is hashed or if it refers to a blockdev.
1005  * If it was not hashed, it will never be added to the dirty list
1006  * even if it is later hashed, as it will have been marked dirty already.
1007  *
1008  * In short, make sure you hash any inodes _before_ you start marking
1009  * them dirty.
1010  *
1011  * This function *must* be atomic for the I_DIRTY_PAGES case -
1012  * set_page_dirty() is called under spinlock in several places.
1013  *
1014  * Note that for blockdevs, inode->dirtied_when represents the dirtying time of
1015  * the block-special inode (/dev/hda1) itself.  And the ->dirtied_when field of
1016  * the kernel-internal blockdev inode represents the dirtying time of the
1017  * blockdev's pages.  This is why for I_DIRTY_PAGES we always use
1018  * page->mapping->host, so the page-dirtying time is recorded in the internal
1019  * blockdev inode.
1020  */
1021 void __mark_inode_dirty(struct inode *inode, int flags)
1022 {
1023         struct super_block *sb = inode->i_sb;
1024
1025         /*
1026          * Don't do this for I_DIRTY_PAGES - that doesn't actually
1027          * dirty the inode itself
1028          */
1029         if (flags & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
1030                 if (sb->s_op->dirty_inode)
1031                         sb->s_op->dirty_inode(inode);
1032         }
1033
1034         /*
1035          * make sure that changes are seen by all cpus before we test i_state
1036          * -- mikulas
1037          */
1038         smp_mb();
1039
1040         /* avoid the locking if we can */
1041         if ((inode->i_state & flags) == flags)
1042                 return;
1043
1044         if (unlikely(block_dump))
1045                 block_dump___mark_inode_dirty(inode);
1046
1047         spin_lock(&inode_lock);
1048         if ((inode->i_state & flags) != flags) {
1049                 const int was_dirty = inode->i_state & I_DIRTY;
1050
1051                 inode->i_state |= flags;
1052
1053                 /*
1054                  * If the inode is being synced, just update its dirty state.
1055                  * The unlocker will place the inode on the appropriate
1056                  * superblock list, based upon its state.
1057                  */
1058                 if (inode->i_state & I_SYNC)
1059                         goto out;
1060
1061                 /*
1062                  * Only add valid (hashed) inodes to the superblock's
1063                  * dirty list.  Add blockdev inodes as well.
1064                  */
1065                 if (!S_ISBLK(inode->i_mode)) {
1066                         if (hlist_unhashed(&inode->i_hash))
1067                                 goto out;
1068                 }
1069                 if (inode->i_state & (I_FREEING|I_CLEAR))
1070                         goto out;
1071
1072                 /*
1073                  * If the inode was already on b_dirty/b_io/b_more_io, don't
1074                  * reposition it (that would break b_dirty time-ordering).
1075                  */
1076                 if (!was_dirty) {
1077                         struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
1078                         struct backing_dev_info *bdi = wb->bdi;
1079
1080                         if (bdi_cap_writeback_dirty(bdi) &&
1081                             !test_bit(BDI_registered, &bdi->state)) {
1082                                 WARN_ON(1);
1083                                 printk(KERN_ERR "bdi-%s not registered\n",
1084                                                                 bdi->name);
1085                         }
1086
1087                         inode->dirtied_when = jiffies;
1088                         list_move(&inode->i_list, &wb->b_dirty);
1089                 }
1090         }
1091 out:
1092         spin_unlock(&inode_lock);
1093 }
1094 EXPORT_SYMBOL(__mark_inode_dirty);
1095
1096 /*
1097  * Write out a superblock's list of dirty inodes.  A wait will be performed
1098  * upon no inodes, all inodes or the final one, depending upon sync_mode.
1099  *
1100  * If older_than_this is non-NULL, then only write out inodes which
1101  * had their first dirtying at a time earlier than *older_than_this.
1102  *
1103  * If `bdi' is non-zero then we're being asked to writeback a specific queue.
1104  * This function assumes that the blockdev superblock's inodes are backed by
1105  * a variety of queues, so all inodes are searched.  For other superblocks,
1106  * assume that all inodes are backed by the same queue.
1107  *
1108  * The inodes to be written are parked on bdi->b_io.  They are moved back onto
1109  * bdi->b_dirty as they are selected for writing.  This way, none can be missed
1110  * on the writer throttling path, and we get decent balancing between many
1111  * throttled threads: we don't want them all piling up on inode_sync_wait.
1112  */
1113 static void wait_sb_inodes(struct super_block *sb)
1114 {
1115         struct inode *inode, *old_inode = NULL;
1116
1117         /*
1118          * We need to be protected against the filesystem going from
1119          * r/o to r/w or vice versa.
1120          */
1121         WARN_ON(!rwsem_is_locked(&sb->s_umount));
1122
1123         spin_lock(&inode_lock);
1124
1125         /*
1126          * Data integrity sync. Must wait for all pages under writeback,
1127          * because there may have been pages dirtied before our sync
1128          * call, but which had writeout started before we write it out.
1129          * In which case, the inode may not be on the dirty list, but
1130          * we still have to wait for that writeout.
1131          */
1132         list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
1133                 struct address_space *mapping;
1134
1135                 if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE|I_NEW))
1136                         continue;
1137                 mapping = inode->i_mapping;
1138                 if (mapping->nrpages == 0)
1139                         continue;
1140                 __iget(inode);
1141                 spin_unlock(&inode_lock);
1142                 /*
1143                  * We hold a reference to 'inode' so it couldn't have
1144                  * been removed from s_inodes list while we dropped the
1145                  * inode_lock.  We cannot iput the inode now as we can
1146                  * be holding the last reference and we cannot iput it
1147                  * under inode_lock. So we keep the reference and iput
1148                  * it later.
1149                  */
1150                 iput(old_inode);
1151                 old_inode = inode;
1152
1153                 filemap_fdatawait(mapping);
1154
1155                 cond_resched();
1156
1157                 spin_lock(&inode_lock);
1158         }
1159         spin_unlock(&inode_lock);
1160         iput(old_inode);
1161 }
1162
1163 /**
1164  * writeback_inodes_sb  -       writeback dirty inodes from given super_block
1165  * @sb: the superblock
1166  *
1167  * Start writeback on some inodes on this super_block. No guarantees are made
1168  * on how many (if any) will be written, and this function does not wait
1169  * for IO completion of submitted IO. The number of pages submitted is
1170  * returned.
1171  */
1172 void writeback_inodes_sb(struct super_block *sb)
1173 {
1174         unsigned long nr_dirty = global_page_state(NR_FILE_DIRTY);
1175         unsigned long nr_unstable = global_page_state(NR_UNSTABLE_NFS);
1176         long nr_to_write;
1177
1178         nr_to_write = nr_dirty + nr_unstable +
1179                         (inodes_stat.nr_inodes - inodes_stat.nr_unused);
1180
1181         bdi_writeback_all(sb, nr_to_write);
1182 }
1183 EXPORT_SYMBOL(writeback_inodes_sb);
1184
1185 /**
1186  * sync_inodes_sb       -       sync sb inode pages
1187  * @sb: the superblock
1188  *
1189  * This function writes and waits on any dirty inode belonging to this
1190  * super_block. The number of pages synced is returned.
1191  */
1192 void sync_inodes_sb(struct super_block *sb)
1193 {
1194         bdi_sync_writeback(sb->s_bdi, sb);
1195         wait_sb_inodes(sb);
1196 }
1197 EXPORT_SYMBOL(sync_inodes_sb);
1198
1199 /**
1200  * write_inode_now      -       write an inode to disk
1201  * @inode: inode to write to disk
1202  * @sync: whether the write should be synchronous or not
1203  *
1204  * This function commits an inode to disk immediately if it is dirty. This is
1205  * primarily needed by knfsd.
1206  *
1207  * The caller must either have a ref on the inode or must have set I_WILL_FREE.
1208  */
1209 int write_inode_now(struct inode *inode, int sync)
1210 {
1211         int ret;
1212         struct writeback_control wbc = {
1213                 .nr_to_write = LONG_MAX,
1214                 .sync_mode = sync ? WB_SYNC_ALL : WB_SYNC_NONE,
1215                 .range_start = 0,
1216                 .range_end = LLONG_MAX,
1217         };
1218
1219         if (!mapping_cap_writeback_dirty(inode->i_mapping))
1220                 wbc.nr_to_write = 0;
1221
1222         might_sleep();
1223         spin_lock(&inode_lock);
1224         ret = writeback_single_inode(inode, &wbc);
1225         spin_unlock(&inode_lock);
1226         if (sync)
1227                 inode_sync_wait(inode);
1228         return ret;
1229 }
1230 EXPORT_SYMBOL(write_inode_now);
1231
1232 /**
1233  * sync_inode - write an inode and its pages to disk.
1234  * @inode: the inode to sync
1235  * @wbc: controls the writeback mode
1236  *
1237  * sync_inode() will write an inode and its pages to disk.  It will also
1238  * correctly update the inode on its superblock's dirty inode lists and will
1239  * update inode->i_state.
1240  *
1241  * The caller must have a ref on the inode.
1242  */
1243 int sync_inode(struct inode *inode, struct writeback_control *wbc)
1244 {
1245         int ret;
1246
1247         spin_lock(&inode_lock);
1248         ret = writeback_single_inode(inode, wbc);
1249         spin_unlock(&inode_lock);
1250         return ret;
1251 }
1252 EXPORT_SYMBOL(sync_inode);