4 * Copyright (C) 1991, 1992, 2002 Linus Torvalds
8 * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
10 * Removed a lot of unnecessary code and simplified things now that
11 * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
13 * Speed up hash, lru, and free list operations. Use gfp() for allocating
14 * hash table, use SLAB cache for buffer heads. SMP threading. -DaveM
16 * Added 32k buffer block sizes - these are required older ARM systems. - RMK
18 * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
21 #include <linux/kernel.h>
22 #include <linux/syscalls.h>
25 #include <linux/percpu.h>
26 #include <linux/slab.h>
27 #include <linux/capability.h>
28 #include <linux/blkdev.h>
29 #include <linux/file.h>
30 #include <linux/quotaops.h>
31 #include <linux/highmem.h>
32 #include <linux/module.h>
33 #include <linux/writeback.h>
34 #include <linux/hash.h>
35 #include <linux/suspend.h>
36 #include <linux/buffer_head.h>
37 #include <linux/task_io_accounting_ops.h>
38 #include <linux/bio.h>
39 #include <linux/notifier.h>
40 #include <linux/cpu.h>
41 #include <linux/bitops.h>
42 #include <linux/mpage.h>
43 #include <linux/bit_spinlock.h>
45 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
47 #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
50 init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
52 bh->b_end_io = handler;
53 bh->b_private = private;
55 EXPORT_SYMBOL(init_buffer);
57 static int sync_buffer(void *word)
59 struct block_device *bd;
60 struct buffer_head *bh
61 = container_of(word, struct buffer_head, b_state);
66 blk_run_address_space(bd->bd_inode->i_mapping);
72 void __lock_buffer(struct buffer_head *bh)
74 wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer,
75 TASK_UNINTERRUPTIBLE);
77 EXPORT_SYMBOL(__lock_buffer);
79 int __lock_buffer_async(struct buffer_head *bh, struct wait_bit_queue *wait)
81 return wait_on_bit_lock_async(&bh->b_state, BH_Lock, sync_buffer,
82 TASK_UNINTERRUPTIBLE, wait);
84 EXPORT_SYMBOL(__lock_buffer_async);
86 void unlock_buffer(struct buffer_head *bh)
88 clear_bit_unlock(BH_Lock, &bh->b_state);
89 smp_mb__after_clear_bit();
90 wake_up_bit(&bh->b_state, BH_Lock);
92 EXPORT_SYMBOL(unlock_buffer);
95 * Block until a buffer comes unlocked. This doesn't stop it
96 * from becoming locked again - you have to lock it yourself
97 * if you want to preserve its state.
99 void __wait_on_buffer(struct buffer_head * bh)
101 wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE);
103 EXPORT_SYMBOL(__wait_on_buffer);
105 int __wait_on_buffer_async(struct buffer_head *bh, struct wait_bit_queue *wait)
107 return wait_on_bit_async(&bh->b_state, BH_Lock, sync_buffer,
108 TASK_UNINTERRUPTIBLE, wait);
110 EXPORT_SYMBOL(__wait_on_buffer_async);
113 __clear_page_buffers(struct page *page)
115 ClearPagePrivate(page);
116 set_page_private(page, 0);
117 page_cache_release(page);
121 static int quiet_error(struct buffer_head *bh)
123 if (!test_bit(BH_Quiet, &bh->b_state) && printk_ratelimit())
129 static void buffer_io_error(struct buffer_head *bh)
131 char b[BDEVNAME_SIZE];
132 printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
133 bdevname(bh->b_bdev, b),
134 (unsigned long long)bh->b_blocknr);
138 * End-of-IO handler helper function which does not touch the bh after
140 * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
141 * a race there is benign: unlock_buffer() only use the bh's address for
142 * hashing after unlocking the buffer, so it doesn't actually touch the bh
145 static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
148 set_buffer_uptodate(bh);
150 /* This happens, due to failed READA attempts. */
151 clear_buffer_uptodate(bh);
157 * Default synchronous end-of-IO handler.. Just mark it up-to-date and
158 * unlock the buffer. This is what ll_rw_block uses too.
160 void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
162 __end_buffer_read_notouch(bh, uptodate);
165 EXPORT_SYMBOL(end_buffer_read_sync);
167 void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
169 char b[BDEVNAME_SIZE];
172 set_buffer_uptodate(bh);
174 if (!buffer_eopnotsupp(bh) && !quiet_error(bh)) {
176 printk(KERN_WARNING "lost page write due to "
178 bdevname(bh->b_bdev, b));
180 set_buffer_write_io_error(bh);
181 clear_buffer_uptodate(bh);
186 EXPORT_SYMBOL(end_buffer_write_sync);
189 * Various filesystems appear to want __find_get_block to be non-blocking.
190 * But it's the page lock which protects the buffers. To get around this,
191 * we get exclusion from try_to_free_buffers with the blockdev mapping's
194 * Hack idea: for the blockdev mapping, i_bufferlist_lock contention
195 * may be quite high. This code could TryLock the page, and if that
196 * succeeds, there is no need to take private_lock. (But if
197 * private_lock is contended then so is mapping->tree_lock).
199 static struct buffer_head *
200 __find_get_block_slow(struct block_device *bdev, sector_t block)
202 struct inode *bd_inode = bdev->bd_inode;
203 struct address_space *bd_mapping = bd_inode->i_mapping;
204 struct buffer_head *ret = NULL;
206 struct buffer_head *bh;
207 struct buffer_head *head;
211 index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
212 page = find_get_page(bd_mapping, index);
216 spin_lock(&bd_mapping->private_lock);
217 if (!page_has_buffers(page))
219 head = page_buffers(page);
222 if (!buffer_mapped(bh))
224 else if (bh->b_blocknr == block) {
229 bh = bh->b_this_page;
230 } while (bh != head);
232 /* we might be here because some of the buffers on this page are
233 * not mapped. This is due to various races between
234 * file io on the block device and getblk. It gets dealt with
235 * elsewhere, don't buffer_error if we had some unmapped buffers
238 printk("__find_get_block_slow() failed. "
239 "block=%llu, b_blocknr=%llu\n",
240 (unsigned long long)block,
241 (unsigned long long)bh->b_blocknr);
242 printk("b_state=0x%08lx, b_size=%zu\n",
243 bh->b_state, bh->b_size);
244 printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
247 spin_unlock(&bd_mapping->private_lock);
248 page_cache_release(page);
253 /* If invalidate_buffers() will trash dirty buffers, it means some kind
254 of fs corruption is going on. Trashing dirty data always imply losing
255 information that was supposed to be just stored on the physical layer
258 Thus invalidate_buffers in general usage is not allwowed to trash
259 dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to
260 be preserved. These buffers are simply skipped.
262 We also skip buffers which are still in use. For example this can
263 happen if a userspace program is reading the block device.
265 NOTE: In the case where the user removed a removable-media-disk even if
266 there's still dirty data not synced on disk (due a bug in the device driver
267 or due an error of the user), by not destroying the dirty buffers we could
268 generate corruption also on the next media inserted, thus a parameter is
269 necessary to handle this case in the most safe way possible (trying
270 to not corrupt also the new disk inserted with the data belonging to
271 the old now corrupted disk). Also for the ramdisk the natural thing
272 to do in order to release the ramdisk memory is to destroy dirty buffers.
274 These are two special cases. Normal usage imply the device driver
275 to issue a sync on the device (without waiting I/O completion) and
276 then an invalidate_buffers call that doesn't trash dirty buffers.
278 For handling cache coherency with the blkdev pagecache the 'update' case
279 is been introduced. It is needed to re-read from disk any pinned
280 buffer. NOTE: re-reading from disk is destructive so we can do it only
281 when we assume nobody is changing the buffercache under our I/O and when
282 we think the disk contains more recent information than the buffercache.
283 The update == 1 pass marks the buffers we need to update, the update == 2
284 pass does the actual I/O. */
285 void invalidate_bdev(struct block_device *bdev)
287 struct address_space *mapping = bdev->bd_inode->i_mapping;
289 if (mapping->nrpages == 0)
292 invalidate_bh_lrus();
293 invalidate_mapping_pages(mapping, 0, -1);
295 EXPORT_SYMBOL(invalidate_bdev);
298 * Kick the writeback threads then try to free up some ZONE_NORMAL memory.
300 static void free_more_memory(void)
305 wakeup_flusher_threads(1024);
308 for_each_online_node(nid) {
309 (void)first_zones_zonelist(node_zonelist(nid, GFP_NOFS),
310 gfp_zone(GFP_NOFS), NULL,
313 try_to_free_pages(node_zonelist(nid, GFP_NOFS), 0,
319 * I/O completion handler for block_read_full_page() - pages
320 * which come unlocked at the end of I/O.
322 static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
325 struct buffer_head *first;
326 struct buffer_head *tmp;
328 int page_uptodate = 1;
330 BUG_ON(!buffer_async_read(bh));
334 set_buffer_uptodate(bh);
336 clear_buffer_uptodate(bh);
337 if (!quiet_error(bh))
343 * Be _very_ careful from here on. Bad things can happen if
344 * two buffer heads end IO at almost the same time and both
345 * decide that the page is now completely done.
347 first = page_buffers(page);
348 local_irq_save(flags);
349 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
350 clear_buffer_async_read(bh);
354 if (!buffer_uptodate(tmp))
356 if (buffer_async_read(tmp)) {
357 BUG_ON(!buffer_locked(tmp));
360 tmp = tmp->b_this_page;
362 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
363 local_irq_restore(flags);
366 * If none of the buffers had errors and they are all
367 * uptodate then we can set the page uptodate.
369 if (page_uptodate && !PageError(page))
370 SetPageUptodate(page);
375 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
376 local_irq_restore(flags);
381 * Completion handler for block_write_full_page() - pages which are unlocked
382 * during I/O, and which have PageWriteback cleared upon I/O completion.
384 void end_buffer_async_write(struct buffer_head *bh, int uptodate)
386 char b[BDEVNAME_SIZE];
388 struct buffer_head *first;
389 struct buffer_head *tmp;
392 BUG_ON(!buffer_async_write(bh));
396 set_buffer_uptodate(bh);
398 if (!quiet_error(bh)) {
400 printk(KERN_WARNING "lost page write due to "
402 bdevname(bh->b_bdev, b));
404 set_bit(AS_EIO, &page->mapping->flags);
405 set_buffer_write_io_error(bh);
406 clear_buffer_uptodate(bh);
410 first = page_buffers(page);
411 local_irq_save(flags);
412 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
414 clear_buffer_async_write(bh);
416 tmp = bh->b_this_page;
418 if (buffer_async_write(tmp)) {
419 BUG_ON(!buffer_locked(tmp));
422 tmp = tmp->b_this_page;
424 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
425 local_irq_restore(flags);
426 end_page_writeback(page);
430 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
431 local_irq_restore(flags);
434 EXPORT_SYMBOL(end_buffer_async_write);
437 * If a page's buffers are under async readin (end_buffer_async_read
438 * completion) then there is a possibility that another thread of
439 * control could lock one of the buffers after it has completed
440 * but while some of the other buffers have not completed. This
441 * locked buffer would confuse end_buffer_async_read() into not unlocking
442 * the page. So the absence of BH_Async_Read tells end_buffer_async_read()
443 * that this buffer is not under async I/O.
445 * The page comes unlocked when it has no locked buffer_async buffers
448 * PageLocked prevents anyone starting new async I/O reads any of
451 * PageWriteback is used to prevent simultaneous writeout of the same
454 * PageLocked prevents anyone from starting writeback of a page which is
455 * under read I/O (PageWriteback is only ever set against a locked page).
457 static void mark_buffer_async_read(struct buffer_head *bh)
459 bh->b_end_io = end_buffer_async_read;
460 set_buffer_async_read(bh);
463 static void mark_buffer_async_write_endio(struct buffer_head *bh,
464 bh_end_io_t *handler)
466 bh->b_end_io = handler;
467 set_buffer_async_write(bh);
470 void mark_buffer_async_write(struct buffer_head *bh)
472 mark_buffer_async_write_endio(bh, end_buffer_async_write);
474 EXPORT_SYMBOL(mark_buffer_async_write);
478 * fs/buffer.c contains helper functions for buffer-backed address space's
479 * fsync functions. A common requirement for buffer-based filesystems is
480 * that certain data from the backing blockdev needs to be written out for
481 * a successful fsync(). For example, ext2 indirect blocks need to be
482 * written back and waited upon before fsync() returns.
484 * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
485 * inode_has_buffers() and invalidate_inode_buffers() are provided for the
486 * management of a list of dependent buffers at ->i_mapping->private_list.
488 * Locking is a little subtle: try_to_free_buffers() will remove buffers
489 * from their controlling inode's queue when they are being freed. But
490 * try_to_free_buffers() will be operating against the *blockdev* mapping
491 * at the time, not against the S_ISREG file which depends on those buffers.
492 * So the locking for private_list is via the private_lock in the address_space
493 * which backs the buffers. Which is different from the address_space
494 * against which the buffers are listed. So for a particular address_space,
495 * mapping->private_lock does *not* protect mapping->private_list! In fact,
496 * mapping->private_list will always be protected by the backing blockdev's
499 * Which introduces a requirement: all buffers on an address_space's
500 * ->private_list must be from the same address_space: the blockdev's.
502 * address_spaces which do not place buffers at ->private_list via these
503 * utility functions are free to use private_lock and private_list for
504 * whatever they want. The only requirement is that list_empty(private_list)
505 * be true at clear_inode() time.
507 * FIXME: clear_inode should not call invalidate_inode_buffers(). The
508 * filesystems should do that. invalidate_inode_buffers() should just go
509 * BUG_ON(!list_empty).
511 * FIXME: mark_buffer_dirty_inode() is a data-plane operation. It should
512 * take an address_space, not an inode. And it should be called
513 * mark_buffer_dirty_fsync() to clearly define why those buffers are being
516 * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
517 * list if it is already on a list. Because if the buffer is on a list,
518 * it *must* already be on the right one. If not, the filesystem is being
519 * silly. This will save a ton of locking. But first we have to ensure
520 * that buffers are taken *off* the old inode's list when they are freed
521 * (presumably in truncate). That requires careful auditing of all
522 * filesystems (do it inside bforget()). It could also be done by bringing
527 * The buffer's backing address_space's private_lock must be held
529 static void __remove_assoc_queue(struct buffer_head *bh)
531 list_del_init(&bh->b_assoc_buffers);
532 WARN_ON(!bh->b_assoc_map);
533 if (buffer_write_io_error(bh))
534 set_bit(AS_EIO, &bh->b_assoc_map->flags);
535 bh->b_assoc_map = NULL;
538 int inode_has_buffers(struct inode *inode)
540 return !list_empty(&inode->i_data.private_list);
544 * osync is designed to support O_SYNC io. It waits synchronously for
545 * all already-submitted IO to complete, but does not queue any new
546 * writes to the disk.
548 * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
549 * you dirty the buffers, and then use osync_inode_buffers to wait for
550 * completion. Any other dirty buffers which are not yet queued for
551 * write will not be flushed to disk by the osync.
553 static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
555 struct buffer_head *bh;
561 list_for_each_prev(p, list) {
563 if (buffer_locked(bh)) {
567 if (!buffer_uptodate(bh))
578 static void do_thaw_all(struct work_struct *work)
580 struct super_block *sb;
581 char b[BDEVNAME_SIZE];
585 list_for_each_entry(sb, &super_blocks, s_list) {
587 spin_unlock(&sb_lock);
588 down_read(&sb->s_umount);
589 while (sb->s_bdev && !thaw_bdev(sb->s_bdev, sb))
590 printk(KERN_WARNING "Emergency Thaw on %s\n",
591 bdevname(sb->s_bdev, b));
592 up_read(&sb->s_umount);
594 if (__put_super_and_need_restart(sb))
597 spin_unlock(&sb_lock);
599 printk(KERN_WARNING "Emergency Thaw complete\n");
603 * emergency_thaw_all -- forcibly thaw every frozen filesystem
605 * Used for emergency unfreeze of all filesystems via SysRq
607 void emergency_thaw_all(void)
609 struct work_struct *work;
611 work = kmalloc(sizeof(*work), GFP_ATOMIC);
613 INIT_WORK(work, do_thaw_all);
619 * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers
620 * @mapping: the mapping which wants those buffers written
622 * Starts I/O against the buffers at mapping->private_list, and waits upon
625 * Basically, this is a convenience function for fsync().
626 * @mapping is a file or directory which needs those buffers to be written for
627 * a successful fsync().
629 int sync_mapping_buffers(struct address_space *mapping)
631 struct address_space *buffer_mapping = mapping->assoc_mapping;
633 if (buffer_mapping == NULL || list_empty(&mapping->private_list))
636 return fsync_buffers_list(&buffer_mapping->private_lock,
637 &mapping->private_list);
639 EXPORT_SYMBOL(sync_mapping_buffers);
642 * Called when we've recently written block `bblock', and it is known that
643 * `bblock' was for a buffer_boundary() buffer. This means that the block at
644 * `bblock + 1' is probably a dirty indirect block. Hunt it down and, if it's
645 * dirty, schedule it for IO. So that indirects merge nicely with their data.
647 void write_boundary_block(struct block_device *bdev,
648 sector_t bblock, unsigned blocksize)
650 struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
652 if (buffer_dirty(bh))
653 ll_rw_block(WRITE, 1, &bh);
658 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
660 struct address_space *mapping = inode->i_mapping;
661 struct address_space *buffer_mapping = bh->b_page->mapping;
663 mark_buffer_dirty(bh);
664 if (!mapping->assoc_mapping) {
665 mapping->assoc_mapping = buffer_mapping;
667 BUG_ON(mapping->assoc_mapping != buffer_mapping);
669 if (!bh->b_assoc_map) {
670 spin_lock(&buffer_mapping->private_lock);
671 list_move_tail(&bh->b_assoc_buffers,
672 &mapping->private_list);
673 bh->b_assoc_map = mapping;
674 spin_unlock(&buffer_mapping->private_lock);
677 EXPORT_SYMBOL(mark_buffer_dirty_inode);
680 * Mark the page dirty, and set it dirty in the radix tree, and mark the inode
683 * If warn is true, then emit a warning if the page is not uptodate and has
684 * not been truncated.
686 static void __set_page_dirty(struct page *page,
687 struct address_space *mapping, int warn)
689 spin_lock_irq(&mapping->tree_lock);
690 if (page->mapping) { /* Race with truncate? */
691 WARN_ON_ONCE(warn && !PageUptodate(page));
692 account_page_dirtied(page, mapping);
693 radix_tree_tag_set(&mapping->page_tree,
694 page_index(page), PAGECACHE_TAG_DIRTY);
696 spin_unlock_irq(&mapping->tree_lock);
697 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
701 * Add a page to the dirty page list.
703 * It is a sad fact of life that this function is called from several places
704 * deeply under spinlocking. It may not sleep.
706 * If the page has buffers, the uptodate buffers are set dirty, to preserve
707 * dirty-state coherency between the page and the buffers. It the page does
708 * not have buffers then when they are later attached they will all be set
711 * The buffers are dirtied before the page is dirtied. There's a small race
712 * window in which a writepage caller may see the page cleanness but not the
713 * buffer dirtiness. That's fine. If this code were to set the page dirty
714 * before the buffers, a concurrent writepage caller could clear the page dirty
715 * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
716 * page on the dirty page list.
718 * We use private_lock to lock against try_to_free_buffers while using the
719 * page's buffer list. Also use this to protect against clean buffers being
720 * added to the page after it was set dirty.
722 * FIXME: may need to call ->reservepage here as well. That's rather up to the
723 * address_space though.
725 int __set_page_dirty_buffers(struct page *page)
728 struct address_space *mapping = page_mapping(page);
730 if (unlikely(!mapping))
731 return !TestSetPageDirty(page);
733 spin_lock(&mapping->private_lock);
734 if (page_has_buffers(page)) {
735 struct buffer_head *head = page_buffers(page);
736 struct buffer_head *bh = head;
739 set_buffer_dirty(bh);
740 bh = bh->b_this_page;
741 } while (bh != head);
743 newly_dirty = !TestSetPageDirty(page);
744 spin_unlock(&mapping->private_lock);
747 __set_page_dirty(page, mapping, 1);
750 EXPORT_SYMBOL(__set_page_dirty_buffers);
753 * Write out and wait upon a list of buffers.
755 * We have conflicting pressures: we want to make sure that all
756 * initially dirty buffers get waited on, but that any subsequently
757 * dirtied buffers don't. After all, we don't want fsync to last
758 * forever if somebody is actively writing to the file.
760 * Do this in two main stages: first we copy dirty buffers to a
761 * temporary inode list, queueing the writes as we go. Then we clean
762 * up, waiting for those writes to complete.
764 * During this second stage, any subsequent updates to the file may end
765 * up refiling the buffer on the original inode's dirty list again, so
766 * there is a chance we will end up with a buffer queued for write but
767 * not yet completed on that list. So, as a final cleanup we go through
768 * the osync code to catch these locked, dirty buffers without requeuing
769 * any newly dirty buffers for write.
771 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
773 struct buffer_head *bh;
774 struct list_head tmp;
775 struct address_space *mapping, *prev_mapping = NULL;
778 INIT_LIST_HEAD(&tmp);
781 while (!list_empty(list)) {
782 bh = BH_ENTRY(list->next);
783 mapping = bh->b_assoc_map;
784 __remove_assoc_queue(bh);
785 /* Avoid race with mark_buffer_dirty_inode() which does
786 * a lockless check and we rely on seeing the dirty bit */
788 if (buffer_dirty(bh) || buffer_locked(bh)) {
789 list_add(&bh->b_assoc_buffers, &tmp);
790 bh->b_assoc_map = mapping;
791 if (buffer_dirty(bh)) {
795 * Ensure any pending I/O completes so that
796 * ll_rw_block() actually writes the current
797 * contents - it is a noop if I/O is still in
798 * flight on potentially older contents.
800 ll_rw_block(SWRITE_SYNC_PLUG, 1, &bh);
803 * Kick off IO for the previous mapping. Note
804 * that we will not run the very last mapping,
805 * wait_on_buffer() will do that for us
806 * through sync_buffer().
808 if (prev_mapping && prev_mapping != mapping)
809 blk_run_address_space(prev_mapping);
810 prev_mapping = mapping;
818 while (!list_empty(&tmp)) {
819 bh = BH_ENTRY(tmp.prev);
821 mapping = bh->b_assoc_map;
822 __remove_assoc_queue(bh);
823 /* Avoid race with mark_buffer_dirty_inode() which does
824 * a lockless check and we rely on seeing the dirty bit */
826 if (buffer_dirty(bh)) {
827 list_add(&bh->b_assoc_buffers,
828 &mapping->private_list);
829 bh->b_assoc_map = mapping;
833 if (!buffer_uptodate(bh))
840 err2 = osync_buffers_list(lock, list);
848 * Invalidate any and all dirty buffers on a given inode. We are
849 * probably unmounting the fs, but that doesn't mean we have already
850 * done a sync(). Just drop the buffers from the inode list.
852 * NOTE: we take the inode's blockdev's mapping's private_lock. Which
853 * assumes that all the buffers are against the blockdev. Not true
856 void invalidate_inode_buffers(struct inode *inode)
858 if (inode_has_buffers(inode)) {
859 struct address_space *mapping = &inode->i_data;
860 struct list_head *list = &mapping->private_list;
861 struct address_space *buffer_mapping = mapping->assoc_mapping;
863 spin_lock(&buffer_mapping->private_lock);
864 while (!list_empty(list))
865 __remove_assoc_queue(BH_ENTRY(list->next));
866 spin_unlock(&buffer_mapping->private_lock);
869 EXPORT_SYMBOL(invalidate_inode_buffers);
872 * Remove any clean buffers from the inode's buffer list. This is called
873 * when we're trying to free the inode itself. Those buffers can pin it.
875 * Returns true if all buffers were removed.
877 int remove_inode_buffers(struct inode *inode)
881 if (inode_has_buffers(inode)) {
882 struct address_space *mapping = &inode->i_data;
883 struct list_head *list = &mapping->private_list;
884 struct address_space *buffer_mapping = mapping->assoc_mapping;
886 spin_lock(&buffer_mapping->private_lock);
887 while (!list_empty(list)) {
888 struct buffer_head *bh = BH_ENTRY(list->next);
889 if (buffer_dirty(bh)) {
893 __remove_assoc_queue(bh);
895 spin_unlock(&buffer_mapping->private_lock);
901 * Create the appropriate buffers when given a page for data area and
902 * the size of each buffer.. Use the bh->b_this_page linked list to
903 * follow the buffers created. Return NULL if unable to create more
906 * The retry flag is used to differentiate async IO (paging, swapping)
907 * which may not fail from ordinary buffer allocations.
909 struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
912 struct buffer_head *bh, *head;
918 while ((offset -= size) >= 0) {
919 bh = alloc_buffer_head(GFP_NOFS);
924 bh->b_this_page = head;
929 atomic_set(&bh->b_count, 0);
930 bh->b_private = NULL;
933 /* Link the buffer to its page */
934 set_bh_page(bh, page, offset);
936 init_buffer(bh, NULL, NULL);
940 * In case anything failed, we just free everything we got.
946 head = head->b_this_page;
947 free_buffer_head(bh);
952 * Return failure for non-async IO requests. Async IO requests
953 * are not allowed to fail, so we have to wait until buffer heads
954 * become available. But we don't want tasks sleeping with
955 * partially complete buffers, so all were released above.
960 /* We're _really_ low on memory. Now we just
961 * wait for old buffer heads to become free due to
962 * finishing IO. Since this is an async request and
963 * the reserve list is empty, we're sure there are
964 * async buffer heads in use.
969 EXPORT_SYMBOL_GPL(alloc_page_buffers);
972 link_dev_buffers(struct page *page, struct buffer_head *head)
974 struct buffer_head *bh, *tail;
979 bh = bh->b_this_page;
981 tail->b_this_page = head;
982 attach_page_buffers(page, head);
986 * Initialise the state of a blockdev page's buffers.
989 init_page_buffers(struct page *page, struct block_device *bdev,
990 sector_t block, int size)
992 struct buffer_head *head = page_buffers(page);
993 struct buffer_head *bh = head;
994 int uptodate = PageUptodate(page);
997 if (!buffer_mapped(bh)) {
998 init_buffer(bh, NULL, NULL);
1000 bh->b_blocknr = block;
1002 set_buffer_uptodate(bh);
1003 set_buffer_mapped(bh);
1006 bh = bh->b_this_page;
1007 } while (bh != head);
1011 * Create the page-cache page that contains the requested block.
1013 * This is user purely for blockdev mappings.
1015 static struct page *
1016 grow_dev_page(struct block_device *bdev, sector_t block,
1017 pgoff_t index, int size)
1019 struct inode *inode = bdev->bd_inode;
1021 struct buffer_head *bh;
1023 page = find_or_create_page(inode->i_mapping, index,
1024 (mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS)|__GFP_MOVABLE);
1028 BUG_ON(!PageLocked(page));
1030 if (page_has_buffers(page)) {
1031 bh = page_buffers(page);
1032 if (bh->b_size == size) {
1033 init_page_buffers(page, bdev, block, size);
1036 if (!try_to_free_buffers(page))
1041 * Allocate some buffers for this page
1043 bh = alloc_page_buffers(page, size, 0);
1048 * Link the page to the buffers and initialise them. Take the
1049 * lock to be atomic wrt __find_get_block(), which does not
1050 * run under the page lock.
1052 spin_lock(&inode->i_mapping->private_lock);
1053 link_dev_buffers(page, bh);
1054 init_page_buffers(page, bdev, block, size);
1055 spin_unlock(&inode->i_mapping->private_lock);
1061 page_cache_release(page);
1066 * Create buffers for the specified block device block's page. If
1067 * that page was dirty, the buffers are set dirty also.
1070 grow_buffers(struct block_device *bdev, sector_t block, int size)
1079 } while ((size << sizebits) < PAGE_SIZE);
1081 index = block >> sizebits;
1084 * Check for a block which wants to lie outside our maximum possible
1085 * pagecache index. (this comparison is done using sector_t types).
1087 if (unlikely(index != block >> sizebits)) {
1088 char b[BDEVNAME_SIZE];
1090 printk(KERN_ERR "%s: requested out-of-range block %llu for "
1092 __func__, (unsigned long long)block,
1096 block = index << sizebits;
1097 /* Create a page with the proper size buffers.. */
1098 page = grow_dev_page(bdev, block, index, size);
1102 page_cache_release(page);
1106 static struct buffer_head *
1107 __getblk_slow(struct block_device *bdev, sector_t block, int size)
1109 /* Size must be multiple of hard sectorsize */
1110 if (unlikely(size & (bdev_logical_block_size(bdev)-1) ||
1111 (size < 512 || size > PAGE_SIZE))) {
1112 printk(KERN_ERR "getblk(): invalid block size %d requested\n",
1114 printk(KERN_ERR "logical block size: %d\n",
1115 bdev_logical_block_size(bdev));
1122 struct buffer_head * bh;
1125 bh = __find_get_block(bdev, block, size);
1129 ret = grow_buffers(bdev, block, size);
1138 * The relationship between dirty buffers and dirty pages:
1140 * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1141 * the page is tagged dirty in its radix tree.
1143 * At all times, the dirtiness of the buffers represents the dirtiness of
1144 * subsections of the page. If the page has buffers, the page dirty bit is
1145 * merely a hint about the true dirty state.
1147 * When a page is set dirty in its entirety, all its buffers are marked dirty
1148 * (if the page has buffers).
1150 * When a buffer is marked dirty, its page is dirtied, but the page's other
1153 * Also. When blockdev buffers are explicitly read with bread(), they
1154 * individually become uptodate. But their backing page remains not
1155 * uptodate - even if all of its buffers are uptodate. A subsequent
1156 * block_read_full_page() against that page will discover all the uptodate
1157 * buffers, will set the page uptodate and will perform no I/O.
1161 * mark_buffer_dirty - mark a buffer_head as needing writeout
1162 * @bh: the buffer_head to mark dirty
1164 * mark_buffer_dirty() will set the dirty bit against the buffer, then set its
1165 * backing page dirty, then tag the page as dirty in its address_space's radix
1166 * tree and then attach the address_space's inode to its superblock's dirty
1169 * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock,
1170 * mapping->tree_lock and the global inode_lock.
1172 void mark_buffer_dirty(struct buffer_head *bh)
1174 WARN_ON_ONCE(!buffer_uptodate(bh));
1177 * Very *carefully* optimize the it-is-already-dirty case.
1179 * Don't let the final "is it dirty" escape to before we
1180 * perhaps modified the buffer.
1182 if (buffer_dirty(bh)) {
1184 if (buffer_dirty(bh))
1188 if (!test_set_buffer_dirty(bh)) {
1189 struct page *page = bh->b_page;
1190 if (!TestSetPageDirty(page)) {
1191 struct address_space *mapping = page_mapping(page);
1193 __set_page_dirty(page, mapping, 0);
1197 EXPORT_SYMBOL(mark_buffer_dirty);
1200 * Decrement a buffer_head's reference count. If all buffers against a page
1201 * have zero reference count, are clean and unlocked, and if the page is clean
1202 * and unlocked then try_to_free_buffers() may strip the buffers from the page
1203 * in preparation for freeing it (sometimes, rarely, buffers are removed from
1204 * a page but it ends up not being freed, and buffers may later be reattached).
1206 void __brelse(struct buffer_head * buf)
1208 if (atomic_read(&buf->b_count)) {
1212 WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n");
1214 EXPORT_SYMBOL(__brelse);
1217 * bforget() is like brelse(), except it discards any
1218 * potentially dirty data.
1220 void __bforget(struct buffer_head *bh)
1222 clear_buffer_dirty(bh);
1223 if (bh->b_assoc_map) {
1224 struct address_space *buffer_mapping = bh->b_page->mapping;
1226 spin_lock(&buffer_mapping->private_lock);
1227 list_del_init(&bh->b_assoc_buffers);
1228 bh->b_assoc_map = NULL;
1229 spin_unlock(&buffer_mapping->private_lock);
1233 EXPORT_SYMBOL(__bforget);
1235 static struct buffer_head *__bread_slow(struct buffer_head *bh,
1236 struct wait_bit_queue *wait)
1238 if (lock_buffer_async(bh, wait))
1239 return ERR_PTR(-EIOCBRETRY);
1240 if (buffer_uptodate(bh)) {
1245 bh->b_end_io = end_buffer_read_sync;
1246 submit_bh(READ, bh);
1247 if (wait_on_buffer_async(bh, wait))
1248 return ERR_PTR(-EIOCBRETRY);
1249 if (buffer_uptodate(bh))
1257 * Per-cpu buffer LRU implementation. To reduce the cost of __find_get_block().
1258 * The bhs[] array is sorted - newest buffer is at bhs[0]. Buffers have their
1259 * refcount elevated by one when they're in an LRU. A buffer can only appear
1260 * once in a particular CPU's LRU. A single buffer can be present in multiple
1261 * CPU's LRUs at the same time.
1263 * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1264 * sb_find_get_block().
1266 * The LRUs themselves only need locking against invalidate_bh_lrus. We use
1267 * a local interrupt disable for that.
1270 #define BH_LRU_SIZE 8
1273 struct buffer_head *bhs[BH_LRU_SIZE];
1276 static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
1279 #define bh_lru_lock() local_irq_disable()
1280 #define bh_lru_unlock() local_irq_enable()
1282 #define bh_lru_lock() preempt_disable()
1283 #define bh_lru_unlock() preempt_enable()
1286 static inline void check_irqs_on(void)
1288 #ifdef irqs_disabled
1289 BUG_ON(irqs_disabled());
1294 * The LRU management algorithm is dopey-but-simple. Sorry.
1296 static void bh_lru_install(struct buffer_head *bh)
1298 struct buffer_head *evictee = NULL;
1303 lru = &__get_cpu_var(bh_lrus);
1304 if (lru->bhs[0] != bh) {
1305 struct buffer_head *bhs[BH_LRU_SIZE];
1311 for (in = 0; in < BH_LRU_SIZE; in++) {
1312 struct buffer_head *bh2 = lru->bhs[in];
1317 if (out >= BH_LRU_SIZE) {
1318 BUG_ON(evictee != NULL);
1325 while (out < BH_LRU_SIZE)
1327 memcpy(lru->bhs, bhs, sizeof(bhs));
1336 * Look up the bh in this cpu's LRU. If it's there, move it to the head.
1338 static struct buffer_head *
1339 lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
1341 struct buffer_head *ret = NULL;
1347 lru = &__get_cpu_var(bh_lrus);
1348 for (i = 0; i < BH_LRU_SIZE; i++) {
1349 struct buffer_head *bh = lru->bhs[i];
1351 if (bh && bh->b_bdev == bdev &&
1352 bh->b_blocknr == block && bh->b_size == size) {
1355 lru->bhs[i] = lru->bhs[i - 1];
1370 * Perform a pagecache lookup for the matching buffer. If it's there, refresh
1371 * it in the LRU and mark it as accessed. If it is not present then return
1374 struct buffer_head *
1375 __find_get_block(struct block_device *bdev, sector_t block, unsigned size)
1377 struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1380 bh = __find_get_block_slow(bdev, block);
1388 EXPORT_SYMBOL(__find_get_block);
1391 * __getblk will locate (and, if necessary, create) the buffer_head
1392 * which corresponds to the passed block_device, block and size. The
1393 * returned buffer has its reference count incremented.
1395 * __getblk() cannot fail - it just keeps trying. If you pass it an
1396 * illegal block number, __getblk() will happily return a buffer_head
1397 * which represents the non-existent block. Very weird.
1399 * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
1400 * attempt is failing. FIXME, perhaps?
1402 struct buffer_head *
1403 __getblk(struct block_device *bdev, sector_t block, unsigned size)
1405 struct buffer_head *bh = __find_get_block(bdev, block, size);
1409 bh = __getblk_slow(bdev, block, size);
1412 EXPORT_SYMBOL(__getblk);
1415 * Do async read-ahead on a buffer..
1417 void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
1419 struct buffer_head *bh = __getblk(bdev, block, size);
1421 ll_rw_block(READA, 1, &bh);
1425 EXPORT_SYMBOL(__breadahead);
1428 * __bread() - reads a specified block and returns the bh
1429 * @bdev: the block_device to read from
1430 * @block: number of block
1431 * @size: size (in bytes) to read
1433 * Reads a specified block, and returns buffer head that contains it.
1434 * It returns NULL if the block was unreadable.
1436 struct buffer_head *
1437 __bread_async(struct block_device *bdev, sector_t block, unsigned size,
1438 struct wait_bit_queue *wait)
1440 struct buffer_head *bh = __getblk(bdev, block, size);
1442 if (likely(bh) && !buffer_uptodate(bh))
1443 bh = __bread_slow(bh, wait);
1446 EXPORT_SYMBOL(__bread_async);
1449 * invalidate_bh_lrus() is called rarely - but not only at unmount.
1450 * This doesn't race because it runs in each cpu either in irq
1451 * or with preempt disabled.
1453 static void invalidate_bh_lru(void *arg)
1455 struct bh_lru *b = &get_cpu_var(bh_lrus);
1458 for (i = 0; i < BH_LRU_SIZE; i++) {
1462 put_cpu_var(bh_lrus);
1465 void invalidate_bh_lrus(void)
1467 on_each_cpu(invalidate_bh_lru, NULL, 1);
1469 EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
1471 void set_bh_page(struct buffer_head *bh,
1472 struct page *page, unsigned long offset)
1475 BUG_ON(offset >= PAGE_SIZE);
1476 if (PageHighMem(page))
1478 * This catches illegal uses and preserves the offset:
1480 bh->b_data = (char *)(0 + offset);
1482 bh->b_data = page_address(page) + offset;
1484 EXPORT_SYMBOL(set_bh_page);
1487 * Called when truncating a buffer on a page completely.
1489 static void discard_buffer(struct buffer_head * bh)
1492 clear_buffer_dirty(bh);
1494 clear_buffer_mapped(bh);
1495 clear_buffer_req(bh);
1496 clear_buffer_new(bh);
1497 clear_buffer_delay(bh);
1498 clear_buffer_unwritten(bh);
1503 * block_invalidatepage - invalidate part of all of a buffer-backed page
1505 * @page: the page which is affected
1506 * @offset: the index of the truncation point
1508 * block_invalidatepage() is called when all or part of the page has become
1509 * invalidatedby a truncate operation.
1511 * block_invalidatepage() does not have to release all buffers, but it must
1512 * ensure that no dirty buffer is left outside @offset and that no I/O
1513 * is underway against any of the blocks which are outside the truncation
1514 * point. Because the caller is about to free (and possibly reuse) those
1517 void block_invalidatepage(struct page *page, unsigned long offset)
1519 struct buffer_head *head, *bh, *next;
1520 unsigned int curr_off = 0;
1522 BUG_ON(!PageLocked(page));
1523 if (!page_has_buffers(page))
1526 head = page_buffers(page);
1529 unsigned int next_off = curr_off + bh->b_size;
1530 next = bh->b_this_page;
1533 * is this block fully invalidated?
1535 if (offset <= curr_off)
1537 curr_off = next_off;
1539 } while (bh != head);
1542 * We release buffers only if the entire page is being invalidated.
1543 * The get_block cached value has been unconditionally invalidated,
1544 * so real IO is not possible anymore.
1547 try_to_release_page(page, 0);
1551 EXPORT_SYMBOL(block_invalidatepage);
1554 * We attach and possibly dirty the buffers atomically wrt
1555 * __set_page_dirty_buffers() via private_lock. try_to_free_buffers
1556 * is already excluded via the page lock.
1558 void create_empty_buffers(struct page *page,
1559 unsigned long blocksize, unsigned long b_state)
1561 struct buffer_head *bh, *head, *tail;
1563 head = alloc_page_buffers(page, blocksize, 1);
1566 bh->b_state |= b_state;
1568 bh = bh->b_this_page;
1570 tail->b_this_page = head;
1572 spin_lock(&page->mapping->private_lock);
1573 if (PageUptodate(page) || PageDirty(page)) {
1576 if (PageDirty(page))
1577 set_buffer_dirty(bh);
1578 if (PageUptodate(page))
1579 set_buffer_uptodate(bh);
1580 bh = bh->b_this_page;
1581 } while (bh != head);
1583 attach_page_buffers(page, head);
1584 spin_unlock(&page->mapping->private_lock);
1586 EXPORT_SYMBOL(create_empty_buffers);
1589 * We are taking a block for data and we don't want any output from any
1590 * buffer-cache aliases starting from return from that function and
1591 * until the moment when something will explicitly mark the buffer
1592 * dirty (hopefully that will not happen until we will free that block ;-)
1593 * We don't even need to mark it not-uptodate - nobody can expect
1594 * anything from a newly allocated buffer anyway. We used to used
1595 * unmap_buffer() for such invalidation, but that was wrong. We definitely
1596 * don't want to mark the alias unmapped, for example - it would confuse
1597 * anyone who might pick it with bread() afterwards...
1599 * Also.. Note that bforget() doesn't lock the buffer. So there can
1600 * be writeout I/O going on against recently-freed buffers. We don't
1601 * wait on that I/O in bforget() - it's more efficient to wait on the I/O
1602 * only if we really need to. That happens here.
1604 void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
1606 struct buffer_head *old_bh;
1610 old_bh = __find_get_block_slow(bdev, block);
1612 clear_buffer_dirty(old_bh);
1613 wait_on_buffer(old_bh);
1614 clear_buffer_req(old_bh);
1618 EXPORT_SYMBOL(unmap_underlying_metadata);
1621 * NOTE! All mapped/uptodate combinations are valid:
1623 * Mapped Uptodate Meaning
1625 * No No "unknown" - must do get_block()
1626 * No Yes "hole" - zero-filled
1627 * Yes No "allocated" - allocated on disk, not read in
1628 * Yes Yes "valid" - allocated and up-to-date in memory.
1630 * "Dirty" is valid only with the last case (mapped+uptodate).
1634 * While block_write_full_page is writing back the dirty buffers under
1635 * the page lock, whoever dirtied the buffers may decide to clean them
1636 * again at any time. We handle that by only looking at the buffer
1637 * state inside lock_buffer().
1639 * If block_write_full_page() is called for regular writeback
1640 * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1641 * locked buffer. This only can happen if someone has written the buffer
1642 * directly, with submit_bh(). At the address_space level PageWriteback
1643 * prevents this contention from occurring.
1645 * If block_write_full_page() is called with wbc->sync_mode ==
1646 * WB_SYNC_ALL, the writes are posted using WRITE_SYNC_PLUG; this
1647 * causes the writes to be flagged as synchronous writes, but the
1648 * block device queue will NOT be unplugged, since usually many pages
1649 * will be pushed to the out before the higher-level caller actually
1650 * waits for the writes to be completed. The various wait functions,
1651 * such as wait_on_writeback_range() will ultimately call sync_page()
1652 * which will ultimately call blk_run_backing_dev(), which will end up
1653 * unplugging the device queue.
1655 static int __block_write_full_page(struct inode *inode, struct page *page,
1656 get_block_t *get_block, struct writeback_control *wbc,
1657 bh_end_io_t *handler)
1661 sector_t last_block;
1662 struct buffer_head *bh, *head;
1663 const unsigned blocksize = 1 << inode->i_blkbits;
1664 int nr_underway = 0;
1665 int write_op = (wbc->sync_mode == WB_SYNC_ALL ?
1666 WRITE_SYNC_PLUG : WRITE);
1668 BUG_ON(!PageLocked(page));
1670 last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
1672 if (!page_has_buffers(page)) {
1673 create_empty_buffers(page, blocksize,
1674 (1 << BH_Dirty)|(1 << BH_Uptodate));
1678 * Be very careful. We have no exclusion from __set_page_dirty_buffers
1679 * here, and the (potentially unmapped) buffers may become dirty at
1680 * any time. If a buffer becomes dirty here after we've inspected it
1681 * then we just miss that fact, and the page stays dirty.
1683 * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
1684 * handle that here by just cleaning them.
1687 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1688 head = page_buffers(page);
1692 * Get all the dirty buffers mapped to disk addresses and
1693 * handle any aliases from the underlying blockdev's mapping.
1696 if (block > last_block) {
1698 * mapped buffers outside i_size will occur, because
1699 * this page can be outside i_size when there is a
1700 * truncate in progress.
1703 * The buffer was zeroed by block_write_full_page()
1705 clear_buffer_dirty(bh);
1706 set_buffer_uptodate(bh);
1707 } else if ((!buffer_mapped(bh) || buffer_delay(bh)) &&
1709 WARN_ON(bh->b_size != blocksize);
1710 err = get_block(inode, block, bh, 1);
1713 clear_buffer_delay(bh);
1714 if (buffer_new(bh)) {
1715 /* blockdev mappings never come here */
1716 clear_buffer_new(bh);
1717 unmap_underlying_metadata(bh->b_bdev,
1721 bh = bh->b_this_page;
1723 } while (bh != head);
1726 if (!buffer_mapped(bh))
1729 * If it's a fully non-blocking write attempt and we cannot
1730 * lock the buffer then redirty the page. Note that this can
1731 * potentially cause a busy-wait loop from writeback threads
1732 * and kswapd activity, but those code paths have their own
1733 * higher-level throttling.
1735 if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
1737 } else if (!trylock_buffer(bh)) {
1738 redirty_page_for_writepage(wbc, page);
1741 if (test_clear_buffer_dirty(bh)) {
1742 mark_buffer_async_write_endio(bh, handler);
1746 } while ((bh = bh->b_this_page) != head);
1749 * The page and its buffers are protected by PageWriteback(), so we can
1750 * drop the bh refcounts early.
1752 BUG_ON(PageWriteback(page));
1753 set_page_writeback(page);
1756 struct buffer_head *next = bh->b_this_page;
1757 if (buffer_async_write(bh)) {
1758 submit_bh(write_op, bh);
1762 } while (bh != head);
1767 if (nr_underway == 0) {
1769 * The page was marked dirty, but the buffers were
1770 * clean. Someone wrote them back by hand with
1771 * ll_rw_block/submit_bh. A rare case.
1773 end_page_writeback(page);
1776 * The page and buffer_heads can be released at any time from
1784 * ENOSPC, or some other error. We may already have added some
1785 * blocks to the file, so we need to write these out to avoid
1786 * exposing stale data.
1787 * The page is currently locked and not marked for writeback
1790 /* Recovery: lock and submit the mapped buffers */
1792 if (buffer_mapped(bh) && buffer_dirty(bh) &&
1793 !buffer_delay(bh)) {
1795 mark_buffer_async_write_endio(bh, handler);
1798 * The buffer may have been set dirty during
1799 * attachment to a dirty page.
1801 clear_buffer_dirty(bh);
1803 } while ((bh = bh->b_this_page) != head);
1805 BUG_ON(PageWriteback(page));
1806 mapping_set_error(page->mapping, err);
1807 set_page_writeback(page);
1809 struct buffer_head *next = bh->b_this_page;
1810 if (buffer_async_write(bh)) {
1811 clear_buffer_dirty(bh);
1812 submit_bh(write_op, bh);
1816 } while (bh != head);
1822 * If a page has any new buffers, zero them out here, and mark them uptodate
1823 * and dirty so they'll be written out (in order to prevent uninitialised
1824 * block data from leaking). And clear the new bit.
1826 void page_zero_new_buffers(struct page *page, unsigned from, unsigned to)
1828 unsigned int block_start, block_end;
1829 struct buffer_head *head, *bh;
1831 BUG_ON(!PageLocked(page));
1832 if (!page_has_buffers(page))
1835 bh = head = page_buffers(page);
1838 block_end = block_start + bh->b_size;
1840 if (buffer_new(bh)) {
1841 if (block_end > from && block_start < to) {
1842 if (!PageUptodate(page)) {
1843 unsigned start, size;
1845 start = max(from, block_start);
1846 size = min(to, block_end) - start;
1848 zero_user(page, start, size);
1849 set_buffer_uptodate(bh);
1852 clear_buffer_new(bh);
1853 mark_buffer_dirty(bh);
1857 block_start = block_end;
1858 bh = bh->b_this_page;
1859 } while (bh != head);
1861 EXPORT_SYMBOL(page_zero_new_buffers);
1863 static int __block_prepare_write(struct inode *inode, struct page *page,
1864 unsigned from, unsigned to, get_block_t *get_block)
1866 unsigned block_start, block_end;
1869 unsigned blocksize, bbits;
1870 struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
1872 BUG_ON(!PageLocked(page));
1873 BUG_ON(from > PAGE_CACHE_SIZE);
1874 BUG_ON(to > PAGE_CACHE_SIZE);
1877 blocksize = 1 << inode->i_blkbits;
1878 if (!page_has_buffers(page))
1879 create_empty_buffers(page, blocksize, 0);
1880 head = page_buffers(page);
1882 bbits = inode->i_blkbits;
1883 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
1885 for(bh = head, block_start = 0; bh != head || !block_start;
1886 block++, block_start=block_end, bh = bh->b_this_page) {
1887 block_end = block_start + blocksize;
1888 if (block_end <= from || block_start >= to) {
1889 if (PageUptodate(page)) {
1890 if (!buffer_uptodate(bh))
1891 set_buffer_uptodate(bh);
1896 clear_buffer_new(bh);
1897 if (!buffer_mapped(bh)) {
1898 WARN_ON(bh->b_size != blocksize);
1899 err = get_block(inode, block, bh, 1);
1902 if (buffer_new(bh)) {
1903 unmap_underlying_metadata(bh->b_bdev,
1905 if (PageUptodate(page)) {
1906 clear_buffer_new(bh);
1907 set_buffer_uptodate(bh);
1908 mark_buffer_dirty(bh);
1911 if (block_end > to || block_start < from)
1912 zero_user_segments(page,
1918 if (PageUptodate(page)) {
1919 if (!buffer_uptodate(bh))
1920 set_buffer_uptodate(bh);
1923 if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
1924 !buffer_unwritten(bh) &&
1925 (block_start < from || block_end > to)) {
1926 ll_rw_block(READ, 1, &bh);
1931 * If we issued read requests - let them complete.
1933 while(wait_bh > wait) {
1936 ret = wait_on_buffer_async(*--wait_bh, current->io_wait);
1938 WARN(1, "%s: ret\n", __FUNCTION__);
1941 if (!buffer_uptodate(*wait_bh))
1945 page_zero_new_buffers(page, from, to);
1949 static int __block_commit_write(struct inode *inode, struct page *page,
1950 unsigned from, unsigned to)
1952 unsigned block_start, block_end;
1955 struct buffer_head *bh, *head;
1957 blocksize = 1 << inode->i_blkbits;
1959 for(bh = head = page_buffers(page), block_start = 0;
1960 bh != head || !block_start;
1961 block_start=block_end, bh = bh->b_this_page) {
1962 block_end = block_start + blocksize;
1963 if (block_end <= from || block_start >= to) {
1964 if (!buffer_uptodate(bh))
1967 set_buffer_uptodate(bh);
1968 mark_buffer_dirty(bh);
1970 clear_buffer_new(bh);
1974 * If this is a partial write which happened to make all buffers
1975 * uptodate then we can optimize away a bogus readpage() for
1976 * the next read(). Here we 'discover' whether the page went
1977 * uptodate as a result of this (potentially partial) write.
1980 SetPageUptodate(page);
1985 * block_write_begin takes care of the basic task of block allocation and
1986 * bringing partial write blocks uptodate first.
1988 * If *pagep is not NULL, then block_write_begin uses the locked page
1989 * at *pagep rather than allocating its own. In this case, the page will
1990 * not be unlocked or deallocated on failure.
1992 int block_write_begin(struct file *file, struct address_space *mapping,
1993 loff_t pos, unsigned len, unsigned flags,
1994 struct page **pagep, void **fsdata,
1995 get_block_t *get_block)
1997 struct inode *inode = mapping->host;
2001 unsigned start, end;
2004 index = pos >> PAGE_CACHE_SHIFT;
2005 start = pos & (PAGE_CACHE_SIZE - 1);
2011 page = grab_cache_page_write_begin(mapping, index, flags);
2018 BUG_ON(!PageLocked(page));
2020 status = __block_prepare_write(inode, page, start, end, get_block);
2021 if (unlikely(status)) {
2022 ClearPageUptodate(page);
2026 page_cache_release(page);
2030 * prepare_write() may have instantiated a few blocks
2031 * outside i_size. Trim these off again. Don't need
2032 * i_size_read because we hold i_mutex.
2034 if (pos + len > inode->i_size)
2035 vmtruncate(inode, inode->i_size);
2042 EXPORT_SYMBOL(block_write_begin);
2044 int block_write_end(struct file *file, struct address_space *mapping,
2045 loff_t pos, unsigned len, unsigned copied,
2046 struct page *page, void *fsdata)
2048 struct inode *inode = mapping->host;
2051 start = pos & (PAGE_CACHE_SIZE - 1);
2053 if (unlikely(copied < len)) {
2055 * The buffers that were written will now be uptodate, so we
2056 * don't have to worry about a readpage reading them and
2057 * overwriting a partial write. However if we have encountered
2058 * a short write and only partially written into a buffer, it
2059 * will not be marked uptodate, so a readpage might come in and
2060 * destroy our partial write.
2062 * Do the simplest thing, and just treat any short write to a
2063 * non uptodate page as a zero-length write, and force the
2064 * caller to redo the whole thing.
2066 if (!PageUptodate(page))
2069 page_zero_new_buffers(page, start+copied, start+len);
2071 flush_dcache_page(page);
2073 /* This could be a short (even 0-length) commit */
2074 __block_commit_write(inode, page, start, start+copied);
2078 EXPORT_SYMBOL(block_write_end);
2080 int generic_write_end(struct file *file, struct address_space *mapping,
2081 loff_t pos, unsigned len, unsigned copied,
2082 struct page *page, void *fsdata)
2084 struct inode *inode = mapping->host;
2085 int i_size_changed = 0;
2087 copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
2090 * No need to use i_size_read() here, the i_size
2091 * cannot change under us because we hold i_mutex.
2093 * But it's important to update i_size while still holding page lock:
2094 * page writeout could otherwise come in and zero beyond i_size.
2096 if (pos+copied > inode->i_size) {
2097 i_size_write(inode, pos+copied);
2102 page_cache_release(page);
2105 * Don't mark the inode dirty under page lock. First, it unnecessarily
2106 * makes the holding time of page lock longer. Second, it forces lock
2107 * ordering of page lock and transaction start for journaling
2111 mark_inode_dirty(inode);
2115 EXPORT_SYMBOL(generic_write_end);
2118 * block_is_partially_uptodate checks whether buffers within a page are
2121 * Returns true if all buffers which correspond to a file portion
2122 * we want to read are uptodate.
2124 int block_is_partially_uptodate(struct page *page, read_descriptor_t *desc,
2127 struct inode *inode = page->mapping->host;
2128 unsigned block_start, block_end, blocksize;
2130 struct buffer_head *bh, *head;
2133 if (!page_has_buffers(page))
2136 blocksize = 1 << inode->i_blkbits;
2137 to = min_t(unsigned, PAGE_CACHE_SIZE - from, desc->count);
2139 if (from < blocksize && to > PAGE_CACHE_SIZE - blocksize)
2142 head = page_buffers(page);
2146 block_end = block_start + blocksize;
2147 if (block_end > from && block_start < to) {
2148 if (!buffer_uptodate(bh)) {
2152 if (block_end >= to)
2155 block_start = block_end;
2156 bh = bh->b_this_page;
2157 } while (bh != head);
2161 EXPORT_SYMBOL(block_is_partially_uptodate);
2164 * Generic "read page" function for block devices that have the normal
2165 * get_block functionality. This is most of the block device filesystems.
2166 * Reads the page asynchronously --- the unlock_buffer() and
2167 * set/clear_buffer_uptodate() functions propagate buffer state into the
2168 * page struct once IO has completed.
2170 int block_read_full_page(struct page *page, get_block_t *get_block)
2172 struct inode *inode = page->mapping->host;
2173 sector_t iblock, lblock;
2174 struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
2175 unsigned int blocksize;
2177 int fully_mapped = 1;
2179 BUG_ON(!PageLocked(page));
2180 blocksize = 1 << inode->i_blkbits;
2181 if (!page_has_buffers(page))
2182 create_empty_buffers(page, blocksize, 0);
2183 head = page_buffers(page);
2185 iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2186 lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits;
2192 if (buffer_uptodate(bh))
2195 if (!buffer_mapped(bh)) {
2199 if (iblock < lblock) {
2200 WARN_ON(bh->b_size != blocksize);
2201 err = get_block(inode, iblock, bh, 0);
2205 if (!buffer_mapped(bh)) {
2206 zero_user(page, i * blocksize, blocksize);
2208 set_buffer_uptodate(bh);
2212 * get_block() might have updated the buffer
2215 if (buffer_uptodate(bh))
2219 } while (i++, iblock++, (bh = bh->b_this_page) != head);
2222 SetPageMappedToDisk(page);
2226 * All buffers are uptodate - we can set the page uptodate
2227 * as well. But not if get_block() returned an error.
2229 if (!PageError(page))
2230 SetPageUptodate(page);
2235 /* Stage two: lock the buffers */
2236 for (i = 0; i < nr; i++) {
2239 mark_buffer_async_read(bh);
2243 * Stage 3: start the IO. Check for uptodateness
2244 * inside the buffer lock in case another process reading
2245 * the underlying blockdev brought it uptodate (the sct fix).
2247 for (i = 0; i < nr; i++) {
2249 if (buffer_uptodate(bh))
2250 end_buffer_async_read(bh, 1);
2252 submit_bh(READ, bh);
2256 EXPORT_SYMBOL(block_read_full_page);
2258 /* utility function for filesystems that need to do work on expanding
2259 * truncates. Uses filesystem pagecache writes to allow the filesystem to
2260 * deal with the hole.
2262 int generic_cont_expand_simple(struct inode *inode, loff_t size)
2264 struct address_space *mapping = inode->i_mapping;
2269 err = inode_newsize_ok(inode, size);
2273 err = pagecache_write_begin(NULL, mapping, size, 0,
2274 AOP_FLAG_UNINTERRUPTIBLE|AOP_FLAG_CONT_EXPAND,
2279 err = pagecache_write_end(NULL, mapping, size, 0, 0, page, fsdata);
2285 EXPORT_SYMBOL(generic_cont_expand_simple);
2287 static int cont_expand_zero(struct file *file, struct address_space *mapping,
2288 loff_t pos, loff_t *bytes)
2290 struct inode *inode = mapping->host;
2291 unsigned blocksize = 1 << inode->i_blkbits;
2294 pgoff_t index, curidx;
2296 unsigned zerofrom, offset, len;
2299 index = pos >> PAGE_CACHE_SHIFT;
2300 offset = pos & ~PAGE_CACHE_MASK;
2302 while (index > (curidx = (curpos = *bytes)>>PAGE_CACHE_SHIFT)) {
2303 zerofrom = curpos & ~PAGE_CACHE_MASK;
2304 if (zerofrom & (blocksize-1)) {
2305 *bytes |= (blocksize-1);
2308 len = PAGE_CACHE_SIZE - zerofrom;
2310 err = pagecache_write_begin(file, mapping, curpos, len,
2311 AOP_FLAG_UNINTERRUPTIBLE,
2315 zero_user(page, zerofrom, len);
2316 err = pagecache_write_end(file, mapping, curpos, len, len,
2323 balance_dirty_pages_ratelimited(mapping);
2326 /* page covers the boundary, find the boundary offset */
2327 if (index == curidx) {
2328 zerofrom = curpos & ~PAGE_CACHE_MASK;
2329 /* if we will expand the thing last block will be filled */
2330 if (offset <= zerofrom) {
2333 if (zerofrom & (blocksize-1)) {
2334 *bytes |= (blocksize-1);
2337 len = offset - zerofrom;
2339 err = pagecache_write_begin(file, mapping, curpos, len,
2340 AOP_FLAG_UNINTERRUPTIBLE,
2344 zero_user(page, zerofrom, len);
2345 err = pagecache_write_end(file, mapping, curpos, len, len,
2357 * For moronic filesystems that do not allow holes in file.
2358 * We may have to extend the file.
2360 int cont_write_begin(struct file *file, struct address_space *mapping,
2361 loff_t pos, unsigned len, unsigned flags,
2362 struct page **pagep, void **fsdata,
2363 get_block_t *get_block, loff_t *bytes)
2365 struct inode *inode = mapping->host;
2366 unsigned blocksize = 1 << inode->i_blkbits;
2370 err = cont_expand_zero(file, mapping, pos, bytes);
2374 zerofrom = *bytes & ~PAGE_CACHE_MASK;
2375 if (pos+len > *bytes && zerofrom & (blocksize-1)) {
2376 *bytes |= (blocksize-1);
2381 err = block_write_begin(file, mapping, pos, len,
2382 flags, pagep, fsdata, get_block);
2386 EXPORT_SYMBOL(cont_write_begin);
2388 int block_prepare_write(struct page *page, unsigned from, unsigned to,
2389 get_block_t *get_block)
2391 struct inode *inode = page->mapping->host;
2392 int err = __block_prepare_write(inode, page, from, to, get_block);
2394 ClearPageUptodate(page);
2397 EXPORT_SYMBOL(block_prepare_write);
2399 int block_commit_write(struct page *page, unsigned from, unsigned to)
2401 struct inode *inode = page->mapping->host;
2402 __block_commit_write(inode,page,from,to);
2405 EXPORT_SYMBOL(block_commit_write);
2408 * block_page_mkwrite() is not allowed to change the file size as it gets
2409 * called from a page fault handler when a page is first dirtied. Hence we must
2410 * be careful to check for EOF conditions here. We set the page up correctly
2411 * for a written page which means we get ENOSPC checking when writing into
2412 * holes and correct delalloc and unwritten extent mapping on filesystems that
2413 * support these features.
2415 * We are not allowed to take the i_mutex here so we have to play games to
2416 * protect against truncate races as the page could now be beyond EOF. Because
2417 * vmtruncate() writes the inode size before removing pages, once we have the
2418 * page lock we can determine safely if the page is beyond EOF. If it is not
2419 * beyond EOF, then the page is guaranteed safe against truncation until we
2423 block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
2424 get_block_t get_block)
2426 struct page *page = vmf->page;
2427 struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
2430 int ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */
2433 size = i_size_read(inode);
2434 if ((page->mapping != inode->i_mapping) ||
2435 (page_offset(page) > size)) {
2436 /* page got truncated out from underneath us */
2441 /* page is wholly or partially inside EOF */
2442 if (((page->index + 1) << PAGE_CACHE_SHIFT) > size)
2443 end = size & ~PAGE_CACHE_MASK;
2445 end = PAGE_CACHE_SIZE;
2447 ret = block_prepare_write(page, 0, end, get_block);
2449 ret = block_commit_write(page, 0, end);
2451 if (unlikely(ret)) {
2455 else /* -ENOSPC, -EIO, etc */
2456 ret = VM_FAULT_SIGBUS;
2458 ret = VM_FAULT_LOCKED;
2463 EXPORT_SYMBOL(block_page_mkwrite);
2466 * nobh_write_begin()'s prereads are special: the buffer_heads are freed
2467 * immediately, while under the page lock. So it needs a special end_io
2468 * handler which does not touch the bh after unlocking it.
2470 static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
2472 __end_buffer_read_notouch(bh, uptodate);
2476 * Attach the singly-linked list of buffers created by nobh_write_begin, to
2477 * the page (converting it to circular linked list and taking care of page
2480 static void attach_nobh_buffers(struct page *page, struct buffer_head *head)
2482 struct buffer_head *bh;
2484 BUG_ON(!PageLocked(page));
2486 spin_lock(&page->mapping->private_lock);
2489 if (PageDirty(page))
2490 set_buffer_dirty(bh);
2491 if (!bh->b_this_page)
2492 bh->b_this_page = head;
2493 bh = bh->b_this_page;
2494 } while (bh != head);
2495 attach_page_buffers(page, head);
2496 spin_unlock(&page->mapping->private_lock);
2500 * On entry, the page is fully not uptodate.
2501 * On exit the page is fully uptodate in the areas outside (from,to)
2503 int nobh_write_begin(struct file *file, struct address_space *mapping,
2504 loff_t pos, unsigned len, unsigned flags,
2505 struct page **pagep, void **fsdata,
2506 get_block_t *get_block)
2508 struct inode *inode = mapping->host;
2509 const unsigned blkbits = inode->i_blkbits;
2510 const unsigned blocksize = 1 << blkbits;
2511 struct buffer_head *head, *bh;
2515 unsigned block_in_page;
2516 unsigned block_start, block_end;
2517 sector_t block_in_file;
2520 int is_mapped_to_disk = 1;
2522 index = pos >> PAGE_CACHE_SHIFT;
2523 from = pos & (PAGE_CACHE_SIZE - 1);
2526 page = grab_cache_page_write_begin(mapping, index, flags);
2532 if (page_has_buffers(page)) {
2534 page_cache_release(page);
2536 return block_write_begin(file, mapping, pos, len, flags, pagep,
2540 if (PageMappedToDisk(page))
2544 * Allocate buffers so that we can keep track of state, and potentially
2545 * attach them to the page if an error occurs. In the common case of
2546 * no error, they will just be freed again without ever being attached
2547 * to the page (which is all OK, because we're under the page lock).
2549 * Be careful: the buffer linked list is a NULL terminated one, rather
2550 * than the circular one we're used to.
2552 head = alloc_page_buffers(page, blocksize, 0);
2558 block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
2561 * We loop across all blocks in the page, whether or not they are
2562 * part of the affected region. This is so we can discover if the
2563 * page is fully mapped-to-disk.
2565 for (block_start = 0, block_in_page = 0, bh = head;
2566 block_start < PAGE_CACHE_SIZE;
2567 block_in_page++, block_start += blocksize, bh = bh->b_this_page) {
2570 block_end = block_start + blocksize;
2573 if (block_start >= to)
2575 ret = get_block(inode, block_in_file + block_in_page,
2579 if (!buffer_mapped(bh))
2580 is_mapped_to_disk = 0;
2582 unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
2583 if (PageUptodate(page)) {
2584 set_buffer_uptodate(bh);
2587 if (buffer_new(bh) || !buffer_mapped(bh)) {
2588 zero_user_segments(page, block_start, from,
2592 if (buffer_uptodate(bh))
2593 continue; /* reiserfs does this */
2594 if (block_start < from || block_end > to) {
2596 bh->b_end_io = end_buffer_read_nobh;
2597 submit_bh(READ, bh);
2604 * The page is locked, so these buffers are protected from
2605 * any VM or truncate activity. Hence we don't need to care
2606 * for the buffer_head refcounts.
2608 for (bh = head; bh; bh = bh->b_this_page) {
2611 err = wait_on_buffer_async(bh, current->io_wait);
2613 WARN(1, "%s: ret\n", __FUNCTION__);
2616 if (!buffer_uptodate(bh))
2623 if (is_mapped_to_disk)
2624 SetPageMappedToDisk(page);
2626 *fsdata = head; /* to be released by nobh_write_end */
2633 * Error recovery is a bit difficult. We need to zero out blocks that
2634 * were newly allocated, and dirty them to ensure they get written out.
2635 * Buffers need to be attached to the page at this point, otherwise
2636 * the handling of potential IO errors during writeout would be hard
2637 * (could try doing synchronous writeout, but what if that fails too?)
2639 attach_nobh_buffers(page, head);
2640 page_zero_new_buffers(page, from, to);
2644 page_cache_release(page);
2647 if (pos + len > inode->i_size)
2648 vmtruncate(inode, inode->i_size);
2652 EXPORT_SYMBOL(nobh_write_begin);
2654 int nobh_write_end(struct file *file, struct address_space *mapping,
2655 loff_t pos, unsigned len, unsigned copied,
2656 struct page *page, void *fsdata)
2658 struct inode *inode = page->mapping->host;
2659 struct buffer_head *head = fsdata;
2660 struct buffer_head *bh;
2661 BUG_ON(fsdata != NULL && page_has_buffers(page));
2663 if (unlikely(copied < len) && head)
2664 attach_nobh_buffers(page, head);
2665 if (page_has_buffers(page))
2666 return generic_write_end(file, mapping, pos, len,
2667 copied, page, fsdata);
2669 SetPageUptodate(page);
2670 set_page_dirty(page);
2671 if (pos+copied > inode->i_size) {
2672 i_size_write(inode, pos+copied);
2673 mark_inode_dirty(inode);
2677 page_cache_release(page);
2681 head = head->b_this_page;
2682 free_buffer_head(bh);
2687 EXPORT_SYMBOL(nobh_write_end);
2690 * nobh_writepage() - based on block_full_write_page() except
2691 * that it tries to operate without attaching bufferheads to
2694 int nobh_writepage(struct page *page, get_block_t *get_block,
2695 struct writeback_control *wbc)
2697 struct inode * const inode = page->mapping->host;
2698 loff_t i_size = i_size_read(inode);
2699 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2703 /* Is the page fully inside i_size? */
2704 if (page->index < end_index)
2707 /* Is the page fully outside i_size? (truncate in progress) */
2708 offset = i_size & (PAGE_CACHE_SIZE-1);
2709 if (page->index >= end_index+1 || !offset) {
2711 * The page may have dirty, unmapped buffers. For example,
2712 * they may have been added in ext3_writepage(). Make them
2713 * freeable here, so the page does not leak.
2716 /* Not really sure about this - do we need this ? */
2717 if (page->mapping->a_ops->invalidatepage)
2718 page->mapping->a_ops->invalidatepage(page, offset);
2721 return 0; /* don't care */
2725 * The page straddles i_size. It must be zeroed out on each and every
2726 * writepage invocation because it may be mmapped. "A file is mapped
2727 * in multiples of the page size. For a file that is not a multiple of
2728 * the page size, the remaining memory is zeroed when mapped, and
2729 * writes to that region are not written out to the file."
2731 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
2733 ret = mpage_writepage(page, get_block, wbc);
2735 ret = __block_write_full_page(inode, page, get_block, wbc,
2736 end_buffer_async_write);
2739 EXPORT_SYMBOL(nobh_writepage);
2741 int nobh_truncate_page(struct address_space *mapping,
2742 loff_t from, get_block_t *get_block)
2744 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2745 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2748 unsigned length, pos;
2749 struct inode *inode = mapping->host;
2751 struct buffer_head map_bh;
2754 blocksize = 1 << inode->i_blkbits;
2755 length = offset & (blocksize - 1);
2757 /* Block boundary? Nothing to do */
2761 length = blocksize - length;
2762 iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2764 page = grab_cache_page(mapping, index);
2769 if (page_has_buffers(page)) {
2772 page_cache_release(page);
2773 return block_truncate_page(mapping, from, get_block);
2776 /* Find the buffer that contains "offset" */
2778 while (offset >= pos) {
2783 map_bh.b_size = blocksize;
2785 err = get_block(inode, iblock, &map_bh, 0);
2788 /* unmapped? It's a hole - nothing to do */
2789 if (!buffer_mapped(&map_bh))
2792 /* Ok, it's mapped. Make sure it's up-to-date */
2793 if (!PageUptodate(page)) {
2794 err = mapping->a_ops->readpage(NULL, page);
2796 page_cache_release(page);
2800 if (!PageUptodate(page)) {
2804 if (page_has_buffers(page))
2807 zero_user(page, offset, length);
2808 set_page_dirty(page);
2813 page_cache_release(page);
2817 EXPORT_SYMBOL(nobh_truncate_page);
2819 int block_truncate_page(struct address_space *mapping,
2820 loff_t from, get_block_t *get_block)
2822 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2823 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2826 unsigned length, pos;
2827 struct inode *inode = mapping->host;
2829 struct buffer_head *bh;
2832 blocksize = 1 << inode->i_blkbits;
2833 length = offset & (blocksize - 1);
2835 /* Block boundary? Nothing to do */
2839 length = blocksize - length;
2840 iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2842 page = grab_cache_page(mapping, index);
2847 if (!page_has_buffers(page))
2848 create_empty_buffers(page, blocksize, 0);
2850 /* Find the buffer that contains "offset" */
2851 bh = page_buffers(page);
2853 while (offset >= pos) {
2854 bh = bh->b_this_page;
2860 if (!buffer_mapped(bh)) {
2861 WARN_ON(bh->b_size != blocksize);
2862 err = get_block(inode, iblock, bh, 0);
2865 /* unmapped? It's a hole - nothing to do */
2866 if (!buffer_mapped(bh))
2870 /* Ok, it's mapped. Make sure it's up-to-date */
2871 if (PageUptodate(page))
2872 set_buffer_uptodate(bh);
2874 if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
2875 ll_rw_block(READ, 1, &bh);
2876 err = wait_on_buffer_async(bh, current->io_wait);
2878 WARN(1, "err=%d\n", err);
2881 /* Uhhuh. Read error. Complain and punt. */
2883 if (!buffer_uptodate(bh))
2887 zero_user(page, offset, length);
2888 mark_buffer_dirty(bh);
2893 page_cache_release(page);
2897 EXPORT_SYMBOL(block_truncate_page);
2900 * The generic ->writepage function for buffer-backed address_spaces
2901 * this form passes in the end_io handler used to finish the IO.
2903 int block_write_full_page_endio(struct page *page, get_block_t *get_block,
2904 struct writeback_control *wbc, bh_end_io_t *handler)
2906 struct inode * const inode = page->mapping->host;
2907 loff_t i_size = i_size_read(inode);
2908 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2911 /* Is the page fully inside i_size? */
2912 if (page->index < end_index)
2913 return __block_write_full_page(inode, page, get_block, wbc,
2916 /* Is the page fully outside i_size? (truncate in progress) */
2917 offset = i_size & (PAGE_CACHE_SIZE-1);
2918 if (page->index >= end_index+1 || !offset) {
2920 * The page may have dirty, unmapped buffers. For example,
2921 * they may have been added in ext3_writepage(). Make them
2922 * freeable here, so the page does not leak.
2924 do_invalidatepage(page, 0);
2926 return 0; /* don't care */
2930 * The page straddles i_size. It must be zeroed out on each and every
2931 * writepage invokation because it may be mmapped. "A file is mapped
2932 * in multiples of the page size. For a file that is not a multiple of
2933 * the page size, the remaining memory is zeroed when mapped, and
2934 * writes to that region are not written out to the file."
2936 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
2937 return __block_write_full_page(inode, page, get_block, wbc, handler);
2939 EXPORT_SYMBOL(block_write_full_page_endio);
2942 * The generic ->writepage function for buffer-backed address_spaces
2944 int block_write_full_page(struct page *page, get_block_t *get_block,
2945 struct writeback_control *wbc)
2947 return block_write_full_page_endio(page, get_block, wbc,
2948 end_buffer_async_write);
2950 EXPORT_SYMBOL(block_write_full_page);
2952 sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2953 get_block_t *get_block)
2955 struct buffer_head tmp;
2956 struct inode *inode = mapping->host;
2959 tmp.b_size = 1 << inode->i_blkbits;
2960 get_block(inode, block, &tmp, 0);
2961 return tmp.b_blocknr;
2963 EXPORT_SYMBOL(generic_block_bmap);
2965 static void end_bio_bh_io_sync(struct bio *bio, int err)
2967 struct buffer_head *bh = bio->bi_private;
2969 if (err == -EOPNOTSUPP) {
2970 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
2971 set_bit(BH_Eopnotsupp, &bh->b_state);
2974 if (unlikely (test_bit(BIO_QUIET,&bio->bi_flags)))
2975 set_bit(BH_Quiet, &bh->b_state);
2977 bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
2981 int submit_bh(int rw, struct buffer_head * bh)
2986 BUG_ON(!buffer_locked(bh));
2987 BUG_ON(!buffer_mapped(bh));
2988 BUG_ON(!bh->b_end_io);
2989 BUG_ON(buffer_delay(bh));
2990 BUG_ON(buffer_unwritten(bh));
2993 * Mask in barrier bit for a write (could be either a WRITE or a
2996 if (buffer_ordered(bh) && (rw & WRITE))
2997 rw |= WRITE_BARRIER;
3000 * Only clear out a write error when rewriting
3002 if (test_set_buffer_req(bh) && (rw & WRITE))
3003 clear_buffer_write_io_error(bh);
3006 * from here on down, it's all bio -- do the initial mapping,
3007 * submit_bio -> generic_make_request may further map this bio around
3009 bio = bio_alloc(GFP_NOIO, 1);
3011 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
3012 bio->bi_bdev = bh->b_bdev;
3013 bio->bi_io_vec[0].bv_page = bh->b_page;
3014 bio->bi_io_vec[0].bv_len = bh->b_size;
3015 bio->bi_io_vec[0].bv_offset = bh_offset(bh);
3019 bio->bi_size = bh->b_size;
3021 bio->bi_end_io = end_bio_bh_io_sync;
3022 bio->bi_private = bh;
3025 submit_bio(rw, bio);
3027 if (bio_flagged(bio, BIO_EOPNOTSUPP))
3033 EXPORT_SYMBOL(submit_bh);
3036 * ll_rw_block: low-level access to block devices (DEPRECATED)
3037 * @rw: whether to %READ or %WRITE or %SWRITE or maybe %READA (readahead)
3038 * @nr: number of &struct buffer_heads in the array
3039 * @bhs: array of pointers to &struct buffer_head
3041 * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
3042 * requests an I/O operation on them, either a %READ or a %WRITE. The third
3043 * %SWRITE is like %WRITE only we make sure that the *current* data in buffers
3044 * are sent to disk. The fourth %READA option is described in the documentation
3045 * for generic_make_request() which ll_rw_block() calls.
3047 * This function drops any buffer that it cannot get a lock on (with the
3048 * BH_Lock state bit) unless SWRITE is required, any buffer that appears to be
3049 * clean when doing a write request, and any buffer that appears to be
3050 * up-to-date when doing read request. Further it marks as clean buffers that
3051 * are processed for writing (the buffer cache won't assume that they are
3052 * actually clean until the buffer gets unlocked).
3054 * ll_rw_block sets b_end_io to simple completion handler that marks
3055 * the buffer up-to-date (if approriate), unlocks the buffer and wakes
3058 * All of the buffers must be for the same device, and must also be a
3059 * multiple of the current approved size for the device.
3061 void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
3065 for (i = 0; i < nr; i++) {
3066 struct buffer_head *bh = bhs[i];
3068 if (rw == SWRITE || rw == SWRITE_SYNC || rw == SWRITE_SYNC_PLUG)
3070 else if (!trylock_buffer(bh))
3073 if (rw == WRITE || rw == SWRITE || rw == SWRITE_SYNC ||
3074 rw == SWRITE_SYNC_PLUG) {
3075 if (test_clear_buffer_dirty(bh)) {
3076 bh->b_end_io = end_buffer_write_sync;
3078 if (rw == SWRITE_SYNC)
3079 submit_bh(WRITE_SYNC, bh);
3081 submit_bh(WRITE, bh);
3085 if (!buffer_uptodate(bh)) {
3086 bh->b_end_io = end_buffer_read_sync;
3095 EXPORT_SYMBOL(ll_rw_block);
3098 * For a data-integrity writeout, we need to wait upon any in-progress I/O
3099 * and then start new I/O and then wait upon it. The caller must have a ref on
3102 int sync_dirty_buffer(struct buffer_head *bh)
3106 WARN_ON(atomic_read(&bh->b_count) < 1);
3108 if (test_clear_buffer_dirty(bh)) {
3110 bh->b_end_io = end_buffer_write_sync;
3111 ret = submit_bh(WRITE_SYNC, bh);
3113 if (buffer_eopnotsupp(bh)) {
3114 clear_buffer_eopnotsupp(bh);
3117 if (!ret && !buffer_uptodate(bh))
3124 EXPORT_SYMBOL(sync_dirty_buffer);
3127 * try_to_free_buffers() checks if all the buffers on this particular page
3128 * are unused, and releases them if so.
3130 * Exclusion against try_to_free_buffers may be obtained by either
3131 * locking the page or by holding its mapping's private_lock.
3133 * If the page is dirty but all the buffers are clean then we need to
3134 * be sure to mark the page clean as well. This is because the page
3135 * may be against a block device, and a later reattachment of buffers
3136 * to a dirty page will set *all* buffers dirty. Which would corrupt
3137 * filesystem data on the same device.
3139 * The same applies to regular filesystem pages: if all the buffers are
3140 * clean then we set the page clean and proceed. To do that, we require
3141 * total exclusion from __set_page_dirty_buffers(). That is obtained with
3144 * try_to_free_buffers() is non-blocking.
3146 static inline int buffer_busy(struct buffer_head *bh)
3148 return atomic_read(&bh->b_count) |
3149 (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
3153 drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
3155 struct buffer_head *head = page_buffers(page);
3156 struct buffer_head *bh;
3160 if (buffer_write_io_error(bh) && page->mapping)
3161 set_bit(AS_EIO, &page->mapping->flags);
3162 if (buffer_busy(bh))
3164 bh = bh->b_this_page;
3165 } while (bh != head);
3168 struct buffer_head *next = bh->b_this_page;
3170 if (bh->b_assoc_map)
3171 __remove_assoc_queue(bh);
3173 } while (bh != head);
3174 *buffers_to_free = head;
3175 __clear_page_buffers(page);
3181 int try_to_free_buffers(struct page *page)
3183 struct address_space * const mapping = page->mapping;
3184 struct buffer_head *buffers_to_free = NULL;
3187 BUG_ON(!PageLocked(page));
3188 if (PageWriteback(page))
3191 if (mapping == NULL) { /* can this still happen? */
3192 ret = drop_buffers(page, &buffers_to_free);
3196 spin_lock(&mapping->private_lock);
3197 ret = drop_buffers(page, &buffers_to_free);
3200 * If the filesystem writes its buffers by hand (eg ext3)
3201 * then we can have clean buffers against a dirty page. We
3202 * clean the page here; otherwise the VM will never notice
3203 * that the filesystem did any IO at all.
3205 * Also, during truncate, discard_buffer will have marked all
3206 * the page's buffers clean. We discover that here and clean
3209 * private_lock must be held over this entire operation in order
3210 * to synchronise against __set_page_dirty_buffers and prevent the
3211 * dirty bit from being lost.
3214 cancel_dirty_page(page, PAGE_CACHE_SIZE);
3215 spin_unlock(&mapping->private_lock);
3217 if (buffers_to_free) {
3218 struct buffer_head *bh = buffers_to_free;
3221 struct buffer_head *next = bh->b_this_page;
3222 free_buffer_head(bh);
3224 } while (bh != buffers_to_free);
3228 EXPORT_SYMBOL(try_to_free_buffers);
3230 void block_sync_page(struct page *page)
3232 struct address_space *mapping;
3235 mapping = page_mapping(page);
3237 blk_run_backing_dev(mapping->backing_dev_info, page);
3239 EXPORT_SYMBOL(block_sync_page);
3242 * There are no bdflush tunables left. But distributions are
3243 * still running obsolete flush daemons, so we terminate them here.
3245 * Use of bdflush() is deprecated and will be removed in a future kernel.
3246 * The `flush-X' kernel threads fully replace bdflush daemons and this call.
3248 SYSCALL_DEFINE2(bdflush, int, func, long, data)
3250 static int msg_count;
3252 if (!capable(CAP_SYS_ADMIN))
3255 if (msg_count < 5) {
3258 "warning: process `%s' used the obsolete bdflush"
3259 " system call\n", current->comm);
3260 printk(KERN_INFO "Fix your initscripts?\n");
3269 * Buffer-head allocation
3271 static struct kmem_cache *bh_cachep;
3274 * Once the number of bh's in the machine exceeds this level, we start
3275 * stripping them in writeback.
3277 static int max_buffer_heads;
3279 int buffer_heads_over_limit;
3281 struct bh_accounting {
3282 int nr; /* Number of live bh's */
3283 int ratelimit; /* Limit cacheline bouncing */
3286 static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
3288 static void recalc_bh_state(void)
3293 if (__get_cpu_var(bh_accounting).ratelimit++ < 4096)
3295 __get_cpu_var(bh_accounting).ratelimit = 0;
3296 for_each_online_cpu(i)
3297 tot += per_cpu(bh_accounting, i).nr;
3298 buffer_heads_over_limit = (tot > max_buffer_heads);
3301 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
3303 struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags);
3305 INIT_LIST_HEAD(&ret->b_assoc_buffers);
3306 get_cpu_var(bh_accounting).nr++;
3308 put_cpu_var(bh_accounting);
3312 EXPORT_SYMBOL(alloc_buffer_head);
3314 void free_buffer_head(struct buffer_head *bh)
3316 BUG_ON(!list_empty(&bh->b_assoc_buffers));
3317 kmem_cache_free(bh_cachep, bh);
3318 get_cpu_var(bh_accounting).nr--;
3320 put_cpu_var(bh_accounting);
3322 EXPORT_SYMBOL(free_buffer_head);
3324 static void buffer_exit_cpu(int cpu)
3327 struct bh_lru *b = &per_cpu(bh_lrus, cpu);
3329 for (i = 0; i < BH_LRU_SIZE; i++) {
3333 get_cpu_var(bh_accounting).nr += per_cpu(bh_accounting, cpu).nr;
3334 per_cpu(bh_accounting, cpu).nr = 0;
3335 put_cpu_var(bh_accounting);
3338 static int buffer_cpu_notify(struct notifier_block *self,
3339 unsigned long action, void *hcpu)
3341 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
3342 buffer_exit_cpu((unsigned long)hcpu);
3347 * bh_uptodate_or_lock - Test whether the buffer is uptodate
3348 * @bh: struct buffer_head
3350 * Return true if the buffer is up-to-date and false,
3351 * with the buffer locked, if not.
3353 int bh_uptodate_or_lock(struct buffer_head *bh)
3355 if (!buffer_uptodate(bh)) {
3357 if (!buffer_uptodate(bh))
3363 EXPORT_SYMBOL(bh_uptodate_or_lock);
3366 * bh_submit_read - Submit a locked buffer for reading
3367 * @bh: struct buffer_head
3369 * Returns zero on success and -EIO on error.
3371 int bh_submit_read(struct buffer_head *bh)
3373 BUG_ON(!buffer_locked(bh));
3375 if (buffer_uptodate(bh)) {
3381 bh->b_end_io = end_buffer_read_sync;
3382 submit_bh(READ, bh);
3384 if (buffer_uptodate(bh))
3388 EXPORT_SYMBOL(bh_submit_read);
3391 init_buffer_head(void *data)
3393 struct buffer_head *bh = data;
3395 memset(bh, 0, sizeof(*bh));
3396 INIT_LIST_HEAD(&bh->b_assoc_buffers);
3399 void __init buffer_init(void)
3403 bh_cachep = kmem_cache_create("buffer_head",
3404 sizeof(struct buffer_head), 0,
3405 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
3410 * Limit the bh occupancy to 10% of ZONE_NORMAL
3412 nrpages = (nr_free_buffer_pages() * 10) / 100;
3413 max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3414 hotcpu_notifier(buffer_cpu_notify, 0);