writeback: move dirty inodes from super_block to backing_dev_info
[linux-2.6-block.git] / fs / fs-writeback.c
CommitLineData
1da177e4
LT
1/*
2 * fs/fs-writeback.c
3 *
4 * Copyright (C) 2002, Linus Torvalds.
5 *
6 * Contains all the functions related to writing back and waiting
7 * upon dirty inodes against superblocks, and writing back dirty
8 * pages against inodes. ie: data writeback. Writeout of the
9 * inode itself is not handled here.
10 *
e1f8e874 11 * 10Apr2002 Andrew Morton
1da177e4
LT
12 * Split out of fs/inode.c
13 * Additions for address_space-based writeback
14 */
15
16#include <linux/kernel.h>
f5ff8422 17#include <linux/module.h>
1da177e4
LT
18#include <linux/spinlock.h>
19#include <linux/sched.h>
20#include <linux/fs.h>
21#include <linux/mm.h>
22#include <linux/writeback.h>
23#include <linux/blkdev.h>
24#include <linux/backing-dev.h>
25#include <linux/buffer_head.h>
07f3f05c 26#include "internal.h"
1da177e4 27
66f3b8e2 28#define inode_to_bdi(inode) ((inode)->i_mapping->backing_dev_info)
f11b00f3
AB
29
30/**
31 * writeback_acquire - attempt to get exclusive writeback access to a device
32 * @bdi: the device's backing_dev_info structure
33 *
34 * It is a waste of resources to have more than one pdflush thread blocked on
35 * a single request queue. Exclusion at the request_queue level is obtained
36 * via a flag in the request_queue's backing_dev_info.state.
37 *
38 * Non-request_queue-backed address_spaces will share default_backing_dev_info,
39 * unless they implement their own. Which is somewhat inefficient, as this
40 * may prevent concurrent writeback against multiple devices.
41 */
42static int writeback_acquire(struct backing_dev_info *bdi)
43{
44 return !test_and_set_bit(BDI_pdflush, &bdi->state);
45}
46
47/**
48 * writeback_in_progress - determine whether there is writeback in progress
49 * @bdi: the device's backing_dev_info structure.
50 *
51 * Determine whether there is writeback in progress against a backing device.
52 */
53int writeback_in_progress(struct backing_dev_info *bdi)
54{
55 return test_bit(BDI_pdflush, &bdi->state);
56}
57
58/**
59 * writeback_release - relinquish exclusive writeback access against a device.
60 * @bdi: the device's backing_dev_info structure
61 */
62static void writeback_release(struct backing_dev_info *bdi)
63{
64 BUG_ON(!writeback_in_progress(bdi));
65 clear_bit(BDI_pdflush, &bdi->state);
66}
67
4195f73d
NP
68static noinline void block_dump___mark_inode_dirty(struct inode *inode)
69{
70 if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) {
71 struct dentry *dentry;
72 const char *name = "?";
73
74 dentry = d_find_alias(inode);
75 if (dentry) {
76 spin_lock(&dentry->d_lock);
77 name = (const char *) dentry->d_name.name;
78 }
79 printk(KERN_DEBUG
80 "%s(%d): dirtied inode %lu (%s) on %s\n",
81 current->comm, task_pid_nr(current), inode->i_ino,
82 name, inode->i_sb->s_id);
83 if (dentry) {
84 spin_unlock(&dentry->d_lock);
85 dput(dentry);
86 }
87 }
88}
89
1da177e4
LT
90/**
91 * __mark_inode_dirty - internal function
92 * @inode: inode to mark
93 * @flags: what kind of dirty (i.e. I_DIRTY_SYNC)
94 * Mark an inode as dirty. Callers should use mark_inode_dirty or
95 * mark_inode_dirty_sync.
96 *
97 * Put the inode on the super block's dirty list.
98 *
99 * CAREFUL! We mark it dirty unconditionally, but move it onto the
100 * dirty list only if it is hashed or if it refers to a blockdev.
101 * If it was not hashed, it will never be added to the dirty list
102 * even if it is later hashed, as it will have been marked dirty already.
103 *
104 * In short, make sure you hash any inodes _before_ you start marking
105 * them dirty.
106 *
107 * This function *must* be atomic for the I_DIRTY_PAGES case -
108 * set_page_dirty() is called under spinlock in several places.
109 *
110 * Note that for blockdevs, inode->dirtied_when represents the dirtying time of
111 * the block-special inode (/dev/hda1) itself. And the ->dirtied_when field of
112 * the kernel-internal blockdev inode represents the dirtying time of the
113 * blockdev's pages. This is why for I_DIRTY_PAGES we always use
114 * page->mapping->host, so the page-dirtying time is recorded in the internal
115 * blockdev inode.
116 */
117void __mark_inode_dirty(struct inode *inode, int flags)
118{
119 struct super_block *sb = inode->i_sb;
120
121 /*
122 * Don't do this for I_DIRTY_PAGES - that doesn't actually
123 * dirty the inode itself
124 */
125 if (flags & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
126 if (sb->s_op->dirty_inode)
127 sb->s_op->dirty_inode(inode);
128 }
129
130 /*
131 * make sure that changes are seen by all cpus before we test i_state
132 * -- mikulas
133 */
134 smp_mb();
135
136 /* avoid the locking if we can */
137 if ((inode->i_state & flags) == flags)
138 return;
139
4195f73d
NP
140 if (unlikely(block_dump))
141 block_dump___mark_inode_dirty(inode);
1da177e4
LT
142
143 spin_lock(&inode_lock);
144 if ((inode->i_state & flags) != flags) {
145 const int was_dirty = inode->i_state & I_DIRTY;
146
147 inode->i_state |= flags;
148
149 /*
1c0eeaf5 150 * If the inode is being synced, just update its dirty state.
1da177e4
LT
151 * The unlocker will place the inode on the appropriate
152 * superblock list, based upon its state.
153 */
1c0eeaf5 154 if (inode->i_state & I_SYNC)
1da177e4
LT
155 goto out;
156
157 /*
158 * Only add valid (hashed) inodes to the superblock's
159 * dirty list. Add blockdev inodes as well.
160 */
161 if (!S_ISBLK(inode->i_mode)) {
162 if (hlist_unhashed(&inode->i_hash))
163 goto out;
164 }
165 if (inode->i_state & (I_FREEING|I_CLEAR))
166 goto out;
167
168 /*
66f3b8e2
JA
169 * If the inode was already on b_dirty/b_io/b_more_io, don't
170 * reposition it (that would break b_dirty time-ordering).
1da177e4
LT
171 */
172 if (!was_dirty) {
173 inode->dirtied_when = jiffies;
66f3b8e2
JA
174 list_move(&inode->i_list,
175 &inode_to_bdi(inode)->b_dirty);
1da177e4
LT
176 }
177 }
178out:
179 spin_unlock(&inode_lock);
180}
181
182EXPORT_SYMBOL(__mark_inode_dirty);
183
184static int write_inode(struct inode *inode, int sync)
185{
186 if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode))
187 return inode->i_sb->s_op->write_inode(inode, sync);
188 return 0;
189}
190
6610a0bc
AM
191/*
192 * Redirty an inode: set its when-it-was dirtied timestamp and move it to the
193 * furthest end of its superblock's dirty-inode list.
194 *
195 * Before stamping the inode's ->dirtied_when, we check to see whether it is
66f3b8e2 196 * already the most-recently-dirtied inode on the b_dirty list. If that is
6610a0bc
AM
197 * the case then the inode must have been redirtied while it was being written
198 * out and we don't reset its dirtied_when.
199 */
200static void redirty_tail(struct inode *inode)
201{
66f3b8e2 202 struct backing_dev_info *bdi = inode_to_bdi(inode);
6610a0bc 203
66f3b8e2
JA
204 if (!list_empty(&bdi->b_dirty)) {
205 struct inode *tail;
6610a0bc 206
66f3b8e2
JA
207 tail = list_entry(bdi->b_dirty.next, struct inode, i_list);
208 if (time_before(inode->dirtied_when, tail->dirtied_when))
6610a0bc
AM
209 inode->dirtied_when = jiffies;
210 }
66f3b8e2 211 list_move(&inode->i_list, &bdi->b_dirty);
6610a0bc
AM
212}
213
c986d1e2 214/*
66f3b8e2 215 * requeue inode for re-scanning after bdi->b_io list is exhausted.
c986d1e2 216 */
0e0f4fc2 217static void requeue_io(struct inode *inode)
c986d1e2 218{
66f3b8e2 219 list_move(&inode->i_list, &inode_to_bdi(inode)->b_more_io);
c986d1e2
AM
220}
221
1c0eeaf5
JE
222static void inode_sync_complete(struct inode *inode)
223{
224 /*
225 * Prevent speculative execution through spin_unlock(&inode_lock);
226 */
227 smp_mb();
228 wake_up_bit(&inode->i_state, __I_SYNC);
229}
230
d2caa3c5
JL
231static bool inode_dirtied_after(struct inode *inode, unsigned long t)
232{
233 bool ret = time_after(inode->dirtied_when, t);
234#ifndef CONFIG_64BIT
235 /*
236 * For inodes being constantly redirtied, dirtied_when can get stuck.
237 * It _appears_ to be in the future, but is actually in distant past.
238 * This test is necessary to prevent such wrapped-around relative times
239 * from permanently stopping the whole pdflush writeback.
240 */
241 ret = ret && time_before_eq(inode->dirtied_when, jiffies);
242#endif
243 return ret;
244}
245
2c136579
FW
246/*
247 * Move expired dirty inodes from @delaying_queue to @dispatch_queue.
248 */
249static void move_expired_inodes(struct list_head *delaying_queue,
250 struct list_head *dispatch_queue,
251 unsigned long *older_than_this)
252{
253 while (!list_empty(delaying_queue)) {
254 struct inode *inode = list_entry(delaying_queue->prev,
255 struct inode, i_list);
256 if (older_than_this &&
d2caa3c5 257 inode_dirtied_after(inode, *older_than_this))
2c136579
FW
258 break;
259 list_move(&inode->i_list, dispatch_queue);
260 }
261}
262
263/*
264 * Queue all expired dirty inodes for io, eldest first.
265 */
66f3b8e2
JA
266static void queue_io(struct backing_dev_info *bdi,
267 unsigned long *older_than_this)
268{
269 list_splice_init(&bdi->b_more_io, bdi->b_io.prev);
270 move_expired_inodes(&bdi->b_dirty, &bdi->b_io, older_than_this);
271}
272
273static int sb_on_inode_list(struct super_block *sb, struct list_head *list)
2c136579 274{
66f3b8e2
JA
275 struct inode *inode;
276 int ret = 0;
277
278 spin_lock(&inode_lock);
279 list_for_each_entry(inode, list, i_list) {
280 if (inode->i_sb == sb) {
281 ret = 1;
282 break;
283 }
284 }
285 spin_unlock(&inode_lock);
286 return ret;
2c136579
FW
287}
288
08d8e974
FW
289int sb_has_dirty_inodes(struct super_block *sb)
290{
66f3b8e2
JA
291 struct backing_dev_info *bdi;
292 int ret = 0;
293
294 /*
295 * This is REALLY expensive right now, but it'll go away
296 * when the bdi writeback is introduced
297 */
298 mutex_lock(&bdi_lock);
299 list_for_each_entry(bdi, &bdi_list, bdi_list) {
300 if (sb_on_inode_list(sb, &bdi->b_dirty) ||
301 sb_on_inode_list(sb, &bdi->b_io) ||
302 sb_on_inode_list(sb, &bdi->b_more_io)) {
303 ret = 1;
304 break;
305 }
306 }
307 mutex_unlock(&bdi_lock);
308
309 return ret;
08d8e974
FW
310}
311EXPORT_SYMBOL(sb_has_dirty_inodes);
312
1da177e4 313/*
01c03194
CH
314 * Wait for writeback on an inode to complete.
315 */
316static void inode_wait_for_writeback(struct inode *inode)
317{
318 DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC);
319 wait_queue_head_t *wqh;
320
321 wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
322 do {
323 spin_unlock(&inode_lock);
324 __wait_on_bit(wqh, &wq, inode_wait, TASK_UNINTERRUPTIBLE);
325 spin_lock(&inode_lock);
326 } while (inode->i_state & I_SYNC);
327}
328
329/*
330 * Write out an inode's dirty pages. Called under inode_lock. Either the
331 * caller has ref on the inode (either via __iget or via syscall against an fd)
332 * or the inode has I_WILL_FREE set (via generic_forget_inode)
333 *
1da177e4
LT
334 * If `wait' is set, wait on the writeout.
335 *
336 * The whole writeout design is quite complex and fragile. We want to avoid
337 * starvation of particular inodes when others are being redirtied, prevent
338 * livelocks, etc.
339 *
340 * Called under inode_lock.
341 */
342static int
01c03194 343writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
1da177e4 344{
1da177e4 345 struct address_space *mapping = inode->i_mapping;
1da177e4 346 int wait = wbc->sync_mode == WB_SYNC_ALL;
01c03194 347 unsigned dirty;
1da177e4
LT
348 int ret;
349
01c03194
CH
350 if (!atomic_read(&inode->i_count))
351 WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING)));
352 else
353 WARN_ON(inode->i_state & I_WILL_FREE);
354
355 if (inode->i_state & I_SYNC) {
356 /*
357 * If this inode is locked for writeback and we are not doing
66f3b8e2 358 * writeback-for-data-integrity, move it to b_more_io so that
01c03194
CH
359 * writeback can proceed with the other inodes on s_io.
360 *
361 * We'll have another go at writing back this inode when we
66f3b8e2 362 * completed a full scan of b_io.
01c03194
CH
363 */
364 if (!wait) {
365 requeue_io(inode);
366 return 0;
367 }
368
369 /*
370 * It's a data-integrity sync. We must wait.
371 */
372 inode_wait_for_writeback(inode);
373 }
374
1c0eeaf5 375 BUG_ON(inode->i_state & I_SYNC);
1da177e4 376
1c0eeaf5 377 /* Set I_SYNC, reset I_DIRTY */
1da177e4 378 dirty = inode->i_state & I_DIRTY;
1c0eeaf5 379 inode->i_state |= I_SYNC;
1da177e4
LT
380 inode->i_state &= ~I_DIRTY;
381
382 spin_unlock(&inode_lock);
383
384 ret = do_writepages(mapping, wbc);
385
386 /* Don't write the inode if only I_DIRTY_PAGES was set */
387 if (dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
388 int err = write_inode(inode, wait);
389 if (ret == 0)
390 ret = err;
391 }
392
393 if (wait) {
394 int err = filemap_fdatawait(mapping);
395 if (ret == 0)
396 ret = err;
397 }
398
399 spin_lock(&inode_lock);
1c0eeaf5 400 inode->i_state &= ~I_SYNC;
84a89245 401 if (!(inode->i_state & (I_FREEING | I_CLEAR))) {
1da177e4
LT
402 if (!(inode->i_state & I_DIRTY) &&
403 mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
404 /*
405 * We didn't write back all the pages. nfs_writepages()
406 * sometimes bales out without doing anything. Redirty
66f3b8e2 407 * the inode; Move it from b_io onto b_more_io/b_dirty.
1b43ef91
AM
408 */
409 /*
410 * akpm: if the caller was the kupdate function we put
66f3b8e2 411 * this inode at the head of b_dirty so it gets first
1b43ef91
AM
412 * consideration. Otherwise, move it to the tail, for
413 * the reasons described there. I'm not really sure
414 * how much sense this makes. Presumably I had a good
415 * reasons for doing it this way, and I'd rather not
416 * muck with it at present.
1da177e4
LT
417 */
418 if (wbc->for_kupdate) {
419 /*
2c136579 420 * For the kupdate function we move the inode
66f3b8e2 421 * to b_more_io so it will get more writeout as
2c136579 422 * soon as the queue becomes uncongested.
1da177e4
LT
423 */
424 inode->i_state |= I_DIRTY_PAGES;
8bc3be27
FW
425 if (wbc->nr_to_write <= 0) {
426 /*
427 * slice used up: queue for next turn
428 */
429 requeue_io(inode);
430 } else {
431 /*
432 * somehow blocked: retry later
433 */
434 redirty_tail(inode);
435 }
1da177e4
LT
436 } else {
437 /*
438 * Otherwise fully redirty the inode so that
439 * other inodes on this superblock will get some
440 * writeout. Otherwise heavy writing to one
441 * file would indefinitely suspend writeout of
442 * all the other files.
443 */
444 inode->i_state |= I_DIRTY_PAGES;
1b43ef91 445 redirty_tail(inode);
1da177e4
LT
446 }
447 } else if (inode->i_state & I_DIRTY) {
448 /*
449 * Someone redirtied the inode while were writing back
450 * the pages.
451 */
6610a0bc 452 redirty_tail(inode);
1da177e4
LT
453 } else if (atomic_read(&inode->i_count)) {
454 /*
455 * The inode is clean, inuse
456 */
457 list_move(&inode->i_list, &inode_in_use);
458 } else {
459 /*
460 * The inode is clean, unused
461 */
462 list_move(&inode->i_list, &inode_unused);
1da177e4
LT
463 }
464 }
1c0eeaf5 465 inode_sync_complete(inode);
1da177e4
LT
466 return ret;
467}
468
66f3b8e2
JA
469static void generic_sync_bdi_inodes(struct backing_dev_info *bdi,
470 struct writeback_control *wbc,
471 struct super_block *sb)
1da177e4 472{
66f3b8e2 473 const int is_blkdev_sb = sb_is_blkdev_sb(sb);
1da177e4
LT
474 const unsigned long start = jiffies; /* livelock avoidance */
475
ae8547b0 476 spin_lock(&inode_lock);
1da177e4 477
66f3b8e2
JA
478 if (!wbc->for_kupdate || list_empty(&bdi->b_io))
479 queue_io(bdi, wbc->older_than_this);
480
481 while (!list_empty(&bdi->b_io)) {
482 struct inode *inode = list_entry(bdi->b_io.prev,
1da177e4 483 struct inode, i_list);
1da177e4
LT
484 long pages_skipped;
485
66f3b8e2
JA
486 /*
487 * super block given and doesn't match, skip this inode
488 */
489 if (sb && sb != inode->i_sb) {
490 redirty_tail(inode);
491 continue;
492 }
493
1da177e4 494 if (!bdi_cap_writeback_dirty(bdi)) {
9852a0e7 495 redirty_tail(inode);
66f3b8e2 496 if (is_blkdev_sb) {
1da177e4
LT
497 /*
498 * Dirty memory-backed blockdev: the ramdisk
499 * driver does this. Skip just this inode
500 */
501 continue;
502 }
503 /*
504 * Dirty memory-backed inode against a filesystem other
505 * than the kernel-internal bdev filesystem. Skip the
506 * entire superblock.
507 */
508 break;
509 }
510
84a89245 511 if (inode->i_state & (I_NEW | I_WILL_FREE)) {
7ef0d737
NP
512 requeue_io(inode);
513 continue;
514 }
515
1da177e4
LT
516 if (wbc->nonblocking && bdi_write_congested(bdi)) {
517 wbc->encountered_congestion = 1;
66f3b8e2 518 if (!is_blkdev_sb)
1da177e4 519 break; /* Skip a congested fs */
0e0f4fc2 520 requeue_io(inode);
1da177e4
LT
521 continue; /* Skip a congested blockdev */
522 }
523
524 if (wbc->bdi && bdi != wbc->bdi) {
66f3b8e2 525 if (!is_blkdev_sb)
1da177e4 526 break; /* fs has the wrong queue */
0e0f4fc2 527 requeue_io(inode);
1da177e4
LT
528 continue; /* blockdev has wrong queue */
529 }
530
d2caa3c5
JL
531 /*
532 * Was this inode dirtied after sync_sb_inodes was called?
533 * This keeps sync from extra jobs and livelock.
534 */
535 if (inode_dirtied_after(inode, start))
1da177e4
LT
536 break;
537
1da177e4
LT
538 /* Is another pdflush already flushing this queue? */
539 if (current_is_pdflush() && !writeback_acquire(bdi))
540 break;
541
84a89245 542 BUG_ON(inode->i_state & (I_FREEING | I_CLEAR));
1da177e4
LT
543 __iget(inode);
544 pages_skipped = wbc->pages_skipped;
01c03194 545 writeback_single_inode(inode, wbc);
1da177e4
LT
546 if (current_is_pdflush())
547 writeback_release(bdi);
548 if (wbc->pages_skipped != pages_skipped) {
549 /*
550 * writeback is not making progress due to locked
551 * buffers. Skip this inode for now.
552 */
f57b9b7b 553 redirty_tail(inode);
1da177e4
LT
554 }
555 spin_unlock(&inode_lock);
1da177e4 556 iput(inode);
4ffc8444 557 cond_resched();
1da177e4 558 spin_lock(&inode_lock);
8bc3be27
FW
559 if (wbc->nr_to_write <= 0) {
560 wbc->more_io = 1;
1da177e4 561 break;
8bc3be27 562 }
66f3b8e2 563 if (!list_empty(&bdi->b_more_io))
8bc3be27 564 wbc->more_io = 1;
1da177e4 565 }
38f21977 566
66f3b8e2
JA
567 spin_unlock(&inode_lock);
568 /* Leave any unwritten inodes on b_io */
569}
570
571/*
572 * Write out a superblock's list of dirty inodes. A wait will be performed
573 * upon no inodes, all inodes or the final one, depending upon sync_mode.
574 *
575 * If older_than_this is non-NULL, then only write out inodes which
576 * had their first dirtying at a time earlier than *older_than_this.
577 *
578 * If we're a pdlfush thread, then implement pdflush collision avoidance
579 * against the entire list.
580 *
581 * If `bdi' is non-zero then we're being asked to writeback a specific queue.
582 * This function assumes that the blockdev superblock's inodes are backed by
583 * a variety of queues, so all inodes are searched. For other superblocks,
584 * assume that all inodes are backed by the same queue.
585 *
586 * FIXME: this linear search could get expensive with many fileystems. But
587 * how to fix? We need to go from an address_space to all inodes which share
588 * a queue with that address_space. (Easy: have a global "dirty superblocks"
589 * list).
590 *
591 * The inodes to be written are parked on bdi->b_io. They are moved back onto
592 * bdi->b_dirty as they are selected for writing. This way, none can be missed
593 * on the writer throttling path, and we get decent balancing between many
594 * throttled threads: we don't want them all piling up on inode_sync_wait.
595 */
596static void generic_sync_sb_inodes(struct super_block *sb,
597 struct writeback_control *wbc)
598{
599 struct backing_dev_info *bdi;
600
601 if (!wbc->bdi) {
602 mutex_lock(&bdi_lock);
603 list_for_each_entry(bdi, &bdi_list, bdi_list)
604 generic_sync_bdi_inodes(bdi, wbc, sb);
605 mutex_unlock(&bdi_lock);
606 } else
607 generic_sync_bdi_inodes(wbc->bdi, wbc, sb);
608
609 if (wbc->sync_mode == WB_SYNC_ALL) {
38f21977
NP
610 struct inode *inode, *old_inode = NULL;
611
66f3b8e2
JA
612 spin_lock(&inode_lock);
613
38f21977
NP
614 /*
615 * Data integrity sync. Must wait for all pages under writeback,
616 * because there may have been pages dirtied before our sync
617 * call, but which had writeout started before we write it out.
618 * In which case, the inode may not be on the dirty list, but
619 * we still have to wait for that writeout.
620 */
621 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
622 struct address_space *mapping;
623
b6fac63c
WF
624 if (inode->i_state &
625 (I_FREEING|I_CLEAR|I_WILL_FREE|I_NEW))
38f21977
NP
626 continue;
627 mapping = inode->i_mapping;
628 if (mapping->nrpages == 0)
629 continue;
630 __iget(inode);
631 spin_unlock(&inode_lock);
632 /*
633 * We hold a reference to 'inode' so it couldn't have
634 * been removed from s_inodes list while we dropped the
635 * inode_lock. We cannot iput the inode now as we can
636 * be holding the last reference and we cannot iput it
637 * under inode_lock. So we keep the reference and iput
638 * it later.
639 */
640 iput(old_inode);
641 old_inode = inode;
642
643 filemap_fdatawait(mapping);
644
645 cond_resched();
646
647 spin_lock(&inode_lock);
648 }
649 spin_unlock(&inode_lock);
650 iput(old_inode);
66f3b8e2 651 }
1da177e4
LT
652}
653
654/*
655 * Start writeback of dirty pagecache data against all unlocked inodes.
656 *
657 * Note:
658 * We don't need to grab a reference to superblock here. If it has non-empty
66f3b8e2
JA
659 * ->b_dirty it's hadn't been killed yet and kill_super() won't proceed
660 * past sync_inodes_sb() until the ->b_dirty/b_io/b_more_io lists are all
1da177e4
LT
661 * empty. Since __sync_single_inode() regains inode_lock before it finally moves
662 * inode from superblock lists we are OK.
663 *
664 * If `older_than_this' is non-zero then only flush inodes which have a
665 * flushtime older than *older_than_this.
666 *
667 * If `bdi' is non-zero then we will scan the first inode against each
668 * superblock until we find the matching ones. One group will be the dirty
669 * inodes against a filesystem. Then when we hit the dummy blockdev superblock,
670 * sync_sb_inodes will seekout the blockdev which matches `bdi'. Maybe not
671 * super-efficient but we're about to do a ton of I/O...
672 */
673void
674writeback_inodes(struct writeback_control *wbc)
675{
676 struct super_block *sb;
677
678 might_sleep();
679 spin_lock(&sb_lock);
680restart:
797074e4 681 list_for_each_entry_reverse(sb, &super_blocks, s_list) {
08d8e974 682 if (sb_has_dirty_inodes(sb)) {
1da177e4
LT
683 /* we're making our own get_super here */
684 sb->s_count++;
685 spin_unlock(&sb_lock);
686 /*
687 * If we can't get the readlock, there's no sense in
688 * waiting around, most of the time the FS is going to
689 * be unmounted by the time it is released.
690 */
691 if (down_read_trylock(&sb->s_umount)) {
ae8547b0 692 if (sb->s_root)
d8a8559c 693 generic_sync_sb_inodes(sb, wbc);
1da177e4
LT
694 up_read(&sb->s_umount);
695 }
696 spin_lock(&sb_lock);
697 if (__put_super_and_need_restart(sb))
698 goto restart;
699 }
700 if (wbc->nr_to_write <= 0)
701 break;
702 }
703 spin_unlock(&sb_lock);
704}
705
d8a8559c
JA
706/**
707 * writeback_inodes_sb - writeback dirty inodes from given super_block
708 * @sb: the superblock
1da177e4 709 *
d8a8559c
JA
710 * Start writeback on some inodes on this super_block. No guarantees are made
711 * on how many (if any) will be written, and this function does not wait
712 * for IO completion of submitted IO. The number of pages submitted is
713 * returned.
1da177e4 714 */
d8a8559c 715long writeback_inodes_sb(struct super_block *sb)
1da177e4
LT
716{
717 struct writeback_control wbc = {
d8a8559c 718 .sync_mode = WB_SYNC_NONE,
111ebb6e
OH
719 .range_start = 0,
720 .range_end = LLONG_MAX,
1da177e4 721 };
d8a8559c
JA
722 unsigned long nr_dirty = global_page_state(NR_FILE_DIRTY);
723 unsigned long nr_unstable = global_page_state(NR_UNSTABLE_NFS);
724 long nr_to_write;
1da177e4 725
d8a8559c 726 nr_to_write = nr_dirty + nr_unstable +
38f21977 727 (inodes_stat.nr_inodes - inodes_stat.nr_unused);
38f21977 728
d8a8559c
JA
729 wbc.nr_to_write = nr_to_write;
730 generic_sync_sb_inodes(sb, &wbc);
731 return nr_to_write - wbc.nr_to_write;
732}
733EXPORT_SYMBOL(writeback_inodes_sb);
734
735/**
736 * sync_inodes_sb - sync sb inode pages
737 * @sb: the superblock
738 *
739 * This function writes and waits on any dirty inode belonging to this
740 * super_block. The number of pages synced is returned.
741 */
742long sync_inodes_sb(struct super_block *sb)
743{
744 struct writeback_control wbc = {
745 .sync_mode = WB_SYNC_ALL,
746 .range_start = 0,
747 .range_end = LLONG_MAX,
748 };
749 long nr_to_write = LONG_MAX; /* doesn't actually matter */
750
751 wbc.nr_to_write = nr_to_write;
752 generic_sync_sb_inodes(sb, &wbc);
753 return nr_to_write - wbc.nr_to_write;
1da177e4 754}
d8a8559c 755EXPORT_SYMBOL(sync_inodes_sb);
1da177e4 756
1da177e4 757/**
7f04c26d
AA
758 * write_inode_now - write an inode to disk
759 * @inode: inode to write to disk
760 * @sync: whether the write should be synchronous or not
761 *
762 * This function commits an inode to disk immediately if it is dirty. This is
763 * primarily needed by knfsd.
1da177e4 764 *
7f04c26d 765 * The caller must either have a ref on the inode or must have set I_WILL_FREE.
1da177e4 766 */
1da177e4
LT
767int write_inode_now(struct inode *inode, int sync)
768{
769 int ret;
770 struct writeback_control wbc = {
771 .nr_to_write = LONG_MAX,
18914b18 772 .sync_mode = sync ? WB_SYNC_ALL : WB_SYNC_NONE,
111ebb6e
OH
773 .range_start = 0,
774 .range_end = LLONG_MAX,
1da177e4
LT
775 };
776
777 if (!mapping_cap_writeback_dirty(inode->i_mapping))
49364ce2 778 wbc.nr_to_write = 0;
1da177e4
LT
779
780 might_sleep();
781 spin_lock(&inode_lock);
01c03194 782 ret = writeback_single_inode(inode, &wbc);
1da177e4
LT
783 spin_unlock(&inode_lock);
784 if (sync)
1c0eeaf5 785 inode_sync_wait(inode);
1da177e4
LT
786 return ret;
787}
788EXPORT_SYMBOL(write_inode_now);
789
790/**
791 * sync_inode - write an inode and its pages to disk.
792 * @inode: the inode to sync
793 * @wbc: controls the writeback mode
794 *
795 * sync_inode() will write an inode and its pages to disk. It will also
796 * correctly update the inode on its superblock's dirty inode lists and will
797 * update inode->i_state.
798 *
799 * The caller must have a ref on the inode.
800 */
801int sync_inode(struct inode *inode, struct writeback_control *wbc)
802{
803 int ret;
804
805 spin_lock(&inode_lock);
01c03194 806 ret = writeback_single_inode(inode, wbc);
1da177e4
LT
807 spin_unlock(&inode_lock);
808 return ret;
809}
810EXPORT_SYMBOL(sync_inode);
811
812/**
813 * generic_osync_inode - flush all dirty data for a given inode to disk
814 * @inode: inode to write
67be2dd1 815 * @mapping: the address_space that should be flushed
1da177e4
LT
816 * @what: what to write and wait upon
817 *
818 * This can be called by file_write functions for files which have the
819 * O_SYNC flag set, to flush dirty writes to disk.
820 *
821 * @what is a bitmask, specifying which part of the inode's data should be
b8887e6e 822 * written and waited upon.
1da177e4
LT
823 *
824 * OSYNC_DATA: i_mapping's dirty data
825 * OSYNC_METADATA: the buffers at i_mapping->private_list
826 * OSYNC_INODE: the inode itself
827 */
828
829int generic_osync_inode(struct inode *inode, struct address_space *mapping, int what)
830{
831 int err = 0;
832 int need_write_inode_now = 0;
833 int err2;
834
1da177e4
LT
835 if (what & OSYNC_DATA)
836 err = filemap_fdatawrite(mapping);
837 if (what & (OSYNC_METADATA|OSYNC_DATA)) {
838 err2 = sync_mapping_buffers(mapping);
839 if (!err)
840 err = err2;
841 }
842 if (what & OSYNC_DATA) {
843 err2 = filemap_fdatawait(mapping);
844 if (!err)
845 err = err2;
846 }
1da177e4
LT
847
848 spin_lock(&inode_lock);
849 if ((inode->i_state & I_DIRTY) &&
850 ((what & OSYNC_INODE) || (inode->i_state & I_DIRTY_DATASYNC)))
851 need_write_inode_now = 1;
852 spin_unlock(&inode_lock);
853
854 if (need_write_inode_now) {
855 err2 = write_inode_now(inode, 1);
856 if (!err)
857 err = err2;
858 }
859 else
1c0eeaf5 860 inode_sync_wait(inode);
1da177e4
LT
861
862 return err;
863}
1da177e4 864EXPORT_SYMBOL(generic_osync_inode);