4 * Copyright (C) 1991, 1992, 2002 Linus Torvalds
8 * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
10 * Removed a lot of unnecessary code and simplified things now that
11 * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
13 * Speed up hash, lru, and free list operations. Use gfp() for allocating
14 * hash table, use SLAB cache for buffer heads. SMP threading. -DaveM
16 * Added 32k buffer block sizes - these are required older ARM systems. - RMK
18 * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
21 #include <linux/kernel.h>
22 #include <linux/syscalls.h>
25 #include <linux/percpu.h>
26 #include <linux/slab.h>
27 #include <linux/capability.h>
28 #include <linux/blkdev.h>
29 #include <linux/file.h>
30 #include <linux/quotaops.h>
31 #include <linux/highmem.h>
32 #include <linux/module.h>
33 #include <linux/writeback.h>
34 #include <linux/hash.h>
35 #include <linux/suspend.h>
36 #include <linux/buffer_head.h>
37 #include <linux/task_io_accounting_ops.h>
38 #include <linux/bio.h>
39 #include <linux/notifier.h>
40 #include <linux/cpu.h>
41 #include <linux/bitops.h>
42 #include <linux/mpage.h>
43 #include <linux/bit_spinlock.h>
45 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
47 #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
50 init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
52 bh->b_end_io = handler;
53 bh->b_private = private;
55 EXPORT_SYMBOL(init_buffer);
57 static int sync_buffer(void *word)
59 struct block_device *bd;
60 struct buffer_head *bh
61 = container_of(word, struct buffer_head, b_state);
66 blk_run_address_space(bd->bd_inode->i_mapping);
72 void __lock_buffer(struct buffer_head *bh)
74 wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer,
75 TASK_UNINTERRUPTIBLE);
77 EXPORT_SYMBOL(__lock_buffer);
79 void unlock_buffer(struct buffer_head *bh)
81 clear_bit_unlock(BH_Lock, &bh->b_state);
82 smp_mb__after_clear_bit();
83 wake_up_bit(&bh->b_state, BH_Lock);
85 EXPORT_SYMBOL(unlock_buffer);
88 * Block until a buffer comes unlocked. This doesn't stop it
89 * from becoming locked again - you have to lock it yourself
90 * if you want to preserve its state.
92 void __wait_on_buffer(struct buffer_head * bh)
94 wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE);
96 EXPORT_SYMBOL(__wait_on_buffer);
98 int __wait_on_buffer_async(struct buffer_head *bh, struct wait_bit_queue *wait)
100 return wait_on_bit_async(&bh->b_state, BH_Lock, sync_buffer,
101 TASK_UNINTERRUPTIBLE, wait);
103 EXPORT_SYMBOL(__wait_on_buffer_async);
106 __clear_page_buffers(struct page *page)
108 ClearPagePrivate(page);
109 set_page_private(page, 0);
110 page_cache_release(page);
114 static int quiet_error(struct buffer_head *bh)
116 if (!test_bit(BH_Quiet, &bh->b_state) && printk_ratelimit())
122 static void buffer_io_error(struct buffer_head *bh)
124 char b[BDEVNAME_SIZE];
125 printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
126 bdevname(bh->b_bdev, b),
127 (unsigned long long)bh->b_blocknr);
131 * End-of-IO handler helper function which does not touch the bh after
133 * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
134 * a race there is benign: unlock_buffer() only use the bh's address for
135 * hashing after unlocking the buffer, so it doesn't actually touch the bh
138 static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
141 set_buffer_uptodate(bh);
143 /* This happens, due to failed READA attempts. */
144 clear_buffer_uptodate(bh);
150 * Default synchronous end-of-IO handler.. Just mark it up-to-date and
151 * unlock the buffer. This is what ll_rw_block uses too.
153 void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
155 __end_buffer_read_notouch(bh, uptodate);
158 EXPORT_SYMBOL(end_buffer_read_sync);
160 void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
162 char b[BDEVNAME_SIZE];
165 set_buffer_uptodate(bh);
167 if (!buffer_eopnotsupp(bh) && !quiet_error(bh)) {
169 printk(KERN_WARNING "lost page write due to "
171 bdevname(bh->b_bdev, b));
173 set_buffer_write_io_error(bh);
174 clear_buffer_uptodate(bh);
179 EXPORT_SYMBOL(end_buffer_write_sync);
182 * Various filesystems appear to want __find_get_block to be non-blocking.
183 * But it's the page lock which protects the buffers. To get around this,
184 * we get exclusion from try_to_free_buffers with the blockdev mapping's
187 * Hack idea: for the blockdev mapping, i_bufferlist_lock contention
188 * may be quite high. This code could TryLock the page, and if that
189 * succeeds, there is no need to take private_lock. (But if
190 * private_lock is contended then so is mapping->tree_lock).
192 static struct buffer_head *
193 __find_get_block_slow(struct block_device *bdev, sector_t block)
195 struct inode *bd_inode = bdev->bd_inode;
196 struct address_space *bd_mapping = bd_inode->i_mapping;
197 struct buffer_head *ret = NULL;
199 struct buffer_head *bh;
200 struct buffer_head *head;
204 index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
205 page = find_get_page(bd_mapping, index);
209 spin_lock(&bd_mapping->private_lock);
210 if (!page_has_buffers(page))
212 head = page_buffers(page);
215 if (!buffer_mapped(bh))
217 else if (bh->b_blocknr == block) {
222 bh = bh->b_this_page;
223 } while (bh != head);
225 /* we might be here because some of the buffers on this page are
226 * not mapped. This is due to various races between
227 * file io on the block device and getblk. It gets dealt with
228 * elsewhere, don't buffer_error if we had some unmapped buffers
231 printk("__find_get_block_slow() failed. "
232 "block=%llu, b_blocknr=%llu\n",
233 (unsigned long long)block,
234 (unsigned long long)bh->b_blocknr);
235 printk("b_state=0x%08lx, b_size=%zu\n",
236 bh->b_state, bh->b_size);
237 printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
240 spin_unlock(&bd_mapping->private_lock);
241 page_cache_release(page);
246 /* If invalidate_buffers() will trash dirty buffers, it means some kind
247 of fs corruption is going on. Trashing dirty data always imply losing
248 information that was supposed to be just stored on the physical layer
251 Thus invalidate_buffers in general usage is not allwowed to trash
252 dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to
253 be preserved. These buffers are simply skipped.
255 We also skip buffers which are still in use. For example this can
256 happen if a userspace program is reading the block device.
258 NOTE: In the case where the user removed a removable-media-disk even if
259 there's still dirty data not synced on disk (due a bug in the device driver
260 or due an error of the user), by not destroying the dirty buffers we could
261 generate corruption also on the next media inserted, thus a parameter is
262 necessary to handle this case in the most safe way possible (trying
263 to not corrupt also the new disk inserted with the data belonging to
264 the old now corrupted disk). Also for the ramdisk the natural thing
265 to do in order to release the ramdisk memory is to destroy dirty buffers.
267 These are two special cases. Normal usage imply the device driver
268 to issue a sync on the device (without waiting I/O completion) and
269 then an invalidate_buffers call that doesn't trash dirty buffers.
271 For handling cache coherency with the blkdev pagecache the 'update' case
272 is been introduced. It is needed to re-read from disk any pinned
273 buffer. NOTE: re-reading from disk is destructive so we can do it only
274 when we assume nobody is changing the buffercache under our I/O and when
275 we think the disk contains more recent information than the buffercache.
276 The update == 1 pass marks the buffers we need to update, the update == 2
277 pass does the actual I/O. */
278 void invalidate_bdev(struct block_device *bdev)
280 struct address_space *mapping = bdev->bd_inode->i_mapping;
282 if (mapping->nrpages == 0)
285 invalidate_bh_lrus();
286 invalidate_mapping_pages(mapping, 0, -1);
288 EXPORT_SYMBOL(invalidate_bdev);
291 * Kick the writeback threads then try to free up some ZONE_NORMAL memory.
293 static void free_more_memory(void)
298 wakeup_flusher_threads(1024);
301 for_each_online_node(nid) {
302 (void)first_zones_zonelist(node_zonelist(nid, GFP_NOFS),
303 gfp_zone(GFP_NOFS), NULL,
306 try_to_free_pages(node_zonelist(nid, GFP_NOFS), 0,
312 * I/O completion handler for block_read_full_page() - pages
313 * which come unlocked at the end of I/O.
315 static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
318 struct buffer_head *first;
319 struct buffer_head *tmp;
321 int page_uptodate = 1;
323 BUG_ON(!buffer_async_read(bh));
327 set_buffer_uptodate(bh);
329 clear_buffer_uptodate(bh);
330 if (!quiet_error(bh))
336 * Be _very_ careful from here on. Bad things can happen if
337 * two buffer heads end IO at almost the same time and both
338 * decide that the page is now completely done.
340 first = page_buffers(page);
341 local_irq_save(flags);
342 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
343 clear_buffer_async_read(bh);
347 if (!buffer_uptodate(tmp))
349 if (buffer_async_read(tmp)) {
350 BUG_ON(!buffer_locked(tmp));
353 tmp = tmp->b_this_page;
355 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
356 local_irq_restore(flags);
359 * If none of the buffers had errors and they are all
360 * uptodate then we can set the page uptodate.
362 if (page_uptodate && !PageError(page))
363 SetPageUptodate(page);
368 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
369 local_irq_restore(flags);
374 * Completion handler for block_write_full_page() - pages which are unlocked
375 * during I/O, and which have PageWriteback cleared upon I/O completion.
377 void end_buffer_async_write(struct buffer_head *bh, int uptodate)
379 char b[BDEVNAME_SIZE];
381 struct buffer_head *first;
382 struct buffer_head *tmp;
385 BUG_ON(!buffer_async_write(bh));
389 set_buffer_uptodate(bh);
391 if (!quiet_error(bh)) {
393 printk(KERN_WARNING "lost page write due to "
395 bdevname(bh->b_bdev, b));
397 set_bit(AS_EIO, &page->mapping->flags);
398 set_buffer_write_io_error(bh);
399 clear_buffer_uptodate(bh);
403 first = page_buffers(page);
404 local_irq_save(flags);
405 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
407 clear_buffer_async_write(bh);
409 tmp = bh->b_this_page;
411 if (buffer_async_write(tmp)) {
412 BUG_ON(!buffer_locked(tmp));
415 tmp = tmp->b_this_page;
417 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
418 local_irq_restore(flags);
419 end_page_writeback(page);
423 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
424 local_irq_restore(flags);
427 EXPORT_SYMBOL(end_buffer_async_write);
430 * If a page's buffers are under async readin (end_buffer_async_read
431 * completion) then there is a possibility that another thread of
432 * control could lock one of the buffers after it has completed
433 * but while some of the other buffers have not completed. This
434 * locked buffer would confuse end_buffer_async_read() into not unlocking
435 * the page. So the absence of BH_Async_Read tells end_buffer_async_read()
436 * that this buffer is not under async I/O.
438 * The page comes unlocked when it has no locked buffer_async buffers
441 * PageLocked prevents anyone starting new async I/O reads any of
444 * PageWriteback is used to prevent simultaneous writeout of the same
447 * PageLocked prevents anyone from starting writeback of a page which is
448 * under read I/O (PageWriteback is only ever set against a locked page).
450 static void mark_buffer_async_read(struct buffer_head *bh)
452 bh->b_end_io = end_buffer_async_read;
453 set_buffer_async_read(bh);
456 static void mark_buffer_async_write_endio(struct buffer_head *bh,
457 bh_end_io_t *handler)
459 bh->b_end_io = handler;
460 set_buffer_async_write(bh);
463 void mark_buffer_async_write(struct buffer_head *bh)
465 mark_buffer_async_write_endio(bh, end_buffer_async_write);
467 EXPORT_SYMBOL(mark_buffer_async_write);
471 * fs/buffer.c contains helper functions for buffer-backed address space's
472 * fsync functions. A common requirement for buffer-based filesystems is
473 * that certain data from the backing blockdev needs to be written out for
474 * a successful fsync(). For example, ext2 indirect blocks need to be
475 * written back and waited upon before fsync() returns.
477 * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
478 * inode_has_buffers() and invalidate_inode_buffers() are provided for the
479 * management of a list of dependent buffers at ->i_mapping->private_list.
481 * Locking is a little subtle: try_to_free_buffers() will remove buffers
482 * from their controlling inode's queue when they are being freed. But
483 * try_to_free_buffers() will be operating against the *blockdev* mapping
484 * at the time, not against the S_ISREG file which depends on those buffers.
485 * So the locking for private_list is via the private_lock in the address_space
486 * which backs the buffers. Which is different from the address_space
487 * against which the buffers are listed. So for a particular address_space,
488 * mapping->private_lock does *not* protect mapping->private_list! In fact,
489 * mapping->private_list will always be protected by the backing blockdev's
492 * Which introduces a requirement: all buffers on an address_space's
493 * ->private_list must be from the same address_space: the blockdev's.
495 * address_spaces which do not place buffers at ->private_list via these
496 * utility functions are free to use private_lock and private_list for
497 * whatever they want. The only requirement is that list_empty(private_list)
498 * be true at clear_inode() time.
500 * FIXME: clear_inode should not call invalidate_inode_buffers(). The
501 * filesystems should do that. invalidate_inode_buffers() should just go
502 * BUG_ON(!list_empty).
504 * FIXME: mark_buffer_dirty_inode() is a data-plane operation. It should
505 * take an address_space, not an inode. And it should be called
506 * mark_buffer_dirty_fsync() to clearly define why those buffers are being
509 * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
510 * list if it is already on a list. Because if the buffer is on a list,
511 * it *must* already be on the right one. If not, the filesystem is being
512 * silly. This will save a ton of locking. But first we have to ensure
513 * that buffers are taken *off* the old inode's list when they are freed
514 * (presumably in truncate). That requires careful auditing of all
515 * filesystems (do it inside bforget()). It could also be done by bringing
520 * The buffer's backing address_space's private_lock must be held
522 static void __remove_assoc_queue(struct buffer_head *bh)
524 list_del_init(&bh->b_assoc_buffers);
525 WARN_ON(!bh->b_assoc_map);
526 if (buffer_write_io_error(bh))
527 set_bit(AS_EIO, &bh->b_assoc_map->flags);
528 bh->b_assoc_map = NULL;
531 int inode_has_buffers(struct inode *inode)
533 return !list_empty(&inode->i_data.private_list);
537 * osync is designed to support O_SYNC io. It waits synchronously for
538 * all already-submitted IO to complete, but does not queue any new
539 * writes to the disk.
541 * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
542 * you dirty the buffers, and then use osync_inode_buffers to wait for
543 * completion. Any other dirty buffers which are not yet queued for
544 * write will not be flushed to disk by the osync.
546 static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
548 struct buffer_head *bh;
554 list_for_each_prev(p, list) {
556 if (buffer_locked(bh)) {
560 if (!buffer_uptodate(bh))
571 static void do_thaw_all(struct work_struct *work)
573 struct super_block *sb;
574 char b[BDEVNAME_SIZE];
578 list_for_each_entry(sb, &super_blocks, s_list) {
580 spin_unlock(&sb_lock);
581 down_read(&sb->s_umount);
582 while (sb->s_bdev && !thaw_bdev(sb->s_bdev, sb))
583 printk(KERN_WARNING "Emergency Thaw on %s\n",
584 bdevname(sb->s_bdev, b));
585 up_read(&sb->s_umount);
587 if (__put_super_and_need_restart(sb))
590 spin_unlock(&sb_lock);
592 printk(KERN_WARNING "Emergency Thaw complete\n");
596 * emergency_thaw_all -- forcibly thaw every frozen filesystem
598 * Used for emergency unfreeze of all filesystems via SysRq
600 void emergency_thaw_all(void)
602 struct work_struct *work;
604 work = kmalloc(sizeof(*work), GFP_ATOMIC);
606 INIT_WORK(work, do_thaw_all);
612 * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers
613 * @mapping: the mapping which wants those buffers written
615 * Starts I/O against the buffers at mapping->private_list, and waits upon
618 * Basically, this is a convenience function for fsync().
619 * @mapping is a file or directory which needs those buffers to be written for
620 * a successful fsync().
622 int sync_mapping_buffers(struct address_space *mapping)
624 struct address_space *buffer_mapping = mapping->assoc_mapping;
626 if (buffer_mapping == NULL || list_empty(&mapping->private_list))
629 return fsync_buffers_list(&buffer_mapping->private_lock,
630 &mapping->private_list);
632 EXPORT_SYMBOL(sync_mapping_buffers);
635 * Called when we've recently written block `bblock', and it is known that
636 * `bblock' was for a buffer_boundary() buffer. This means that the block at
637 * `bblock + 1' is probably a dirty indirect block. Hunt it down and, if it's
638 * dirty, schedule it for IO. So that indirects merge nicely with their data.
640 void write_boundary_block(struct block_device *bdev,
641 sector_t bblock, unsigned blocksize)
643 struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
645 if (buffer_dirty(bh))
646 ll_rw_block(WRITE, 1, &bh);
651 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
653 struct address_space *mapping = inode->i_mapping;
654 struct address_space *buffer_mapping = bh->b_page->mapping;
656 mark_buffer_dirty(bh);
657 if (!mapping->assoc_mapping) {
658 mapping->assoc_mapping = buffer_mapping;
660 BUG_ON(mapping->assoc_mapping != buffer_mapping);
662 if (!bh->b_assoc_map) {
663 spin_lock(&buffer_mapping->private_lock);
664 list_move_tail(&bh->b_assoc_buffers,
665 &mapping->private_list);
666 bh->b_assoc_map = mapping;
667 spin_unlock(&buffer_mapping->private_lock);
670 EXPORT_SYMBOL(mark_buffer_dirty_inode);
673 * Mark the page dirty, and set it dirty in the radix tree, and mark the inode
676 * If warn is true, then emit a warning if the page is not uptodate and has
677 * not been truncated.
679 static void __set_page_dirty(struct page *page,
680 struct address_space *mapping, int warn)
682 spin_lock_irq(&mapping->tree_lock);
683 if (page->mapping) { /* Race with truncate? */
684 WARN_ON_ONCE(warn && !PageUptodate(page));
685 account_page_dirtied(page, mapping);
686 radix_tree_tag_set(&mapping->page_tree,
687 page_index(page), PAGECACHE_TAG_DIRTY);
689 spin_unlock_irq(&mapping->tree_lock);
690 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
694 * Add a page to the dirty page list.
696 * It is a sad fact of life that this function is called from several places
697 * deeply under spinlocking. It may not sleep.
699 * If the page has buffers, the uptodate buffers are set dirty, to preserve
700 * dirty-state coherency between the page and the buffers. It the page does
701 * not have buffers then when they are later attached they will all be set
704 * The buffers are dirtied before the page is dirtied. There's a small race
705 * window in which a writepage caller may see the page cleanness but not the
706 * buffer dirtiness. That's fine. If this code were to set the page dirty
707 * before the buffers, a concurrent writepage caller could clear the page dirty
708 * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
709 * page on the dirty page list.
711 * We use private_lock to lock against try_to_free_buffers while using the
712 * page's buffer list. Also use this to protect against clean buffers being
713 * added to the page after it was set dirty.
715 * FIXME: may need to call ->reservepage here as well. That's rather up to the
716 * address_space though.
718 int __set_page_dirty_buffers(struct page *page)
721 struct address_space *mapping = page_mapping(page);
723 if (unlikely(!mapping))
724 return !TestSetPageDirty(page);
726 spin_lock(&mapping->private_lock);
727 if (page_has_buffers(page)) {
728 struct buffer_head *head = page_buffers(page);
729 struct buffer_head *bh = head;
732 set_buffer_dirty(bh);
733 bh = bh->b_this_page;
734 } while (bh != head);
736 newly_dirty = !TestSetPageDirty(page);
737 spin_unlock(&mapping->private_lock);
740 __set_page_dirty(page, mapping, 1);
743 EXPORT_SYMBOL(__set_page_dirty_buffers);
746 * Write out and wait upon a list of buffers.
748 * We have conflicting pressures: we want to make sure that all
749 * initially dirty buffers get waited on, but that any subsequently
750 * dirtied buffers don't. After all, we don't want fsync to last
751 * forever if somebody is actively writing to the file.
753 * Do this in two main stages: first we copy dirty buffers to a
754 * temporary inode list, queueing the writes as we go. Then we clean
755 * up, waiting for those writes to complete.
757 * During this second stage, any subsequent updates to the file may end
758 * up refiling the buffer on the original inode's dirty list again, so
759 * there is a chance we will end up with a buffer queued for write but
760 * not yet completed on that list. So, as a final cleanup we go through
761 * the osync code to catch these locked, dirty buffers without requeuing
762 * any newly dirty buffers for write.
764 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
766 struct buffer_head *bh;
767 struct list_head tmp;
768 struct address_space *mapping, *prev_mapping = NULL;
771 INIT_LIST_HEAD(&tmp);
774 while (!list_empty(list)) {
775 bh = BH_ENTRY(list->next);
776 mapping = bh->b_assoc_map;
777 __remove_assoc_queue(bh);
778 /* Avoid race with mark_buffer_dirty_inode() which does
779 * a lockless check and we rely on seeing the dirty bit */
781 if (buffer_dirty(bh) || buffer_locked(bh)) {
782 list_add(&bh->b_assoc_buffers, &tmp);
783 bh->b_assoc_map = mapping;
784 if (buffer_dirty(bh)) {
788 * Ensure any pending I/O completes so that
789 * ll_rw_block() actually writes the current
790 * contents - it is a noop if I/O is still in
791 * flight on potentially older contents.
793 ll_rw_block(SWRITE_SYNC_PLUG, 1, &bh);
796 * Kick off IO for the previous mapping. Note
797 * that we will not run the very last mapping,
798 * wait_on_buffer() will do that for us
799 * through sync_buffer().
801 if (prev_mapping && prev_mapping != mapping)
802 blk_run_address_space(prev_mapping);
803 prev_mapping = mapping;
811 while (!list_empty(&tmp)) {
812 bh = BH_ENTRY(tmp.prev);
814 mapping = bh->b_assoc_map;
815 __remove_assoc_queue(bh);
816 /* Avoid race with mark_buffer_dirty_inode() which does
817 * a lockless check and we rely on seeing the dirty bit */
819 if (buffer_dirty(bh)) {
820 list_add(&bh->b_assoc_buffers,
821 &mapping->private_list);
822 bh->b_assoc_map = mapping;
826 if (!buffer_uptodate(bh))
833 err2 = osync_buffers_list(lock, list);
841 * Invalidate any and all dirty buffers on a given inode. We are
842 * probably unmounting the fs, but that doesn't mean we have already
843 * done a sync(). Just drop the buffers from the inode list.
845 * NOTE: we take the inode's blockdev's mapping's private_lock. Which
846 * assumes that all the buffers are against the blockdev. Not true
849 void invalidate_inode_buffers(struct inode *inode)
851 if (inode_has_buffers(inode)) {
852 struct address_space *mapping = &inode->i_data;
853 struct list_head *list = &mapping->private_list;
854 struct address_space *buffer_mapping = mapping->assoc_mapping;
856 spin_lock(&buffer_mapping->private_lock);
857 while (!list_empty(list))
858 __remove_assoc_queue(BH_ENTRY(list->next));
859 spin_unlock(&buffer_mapping->private_lock);
862 EXPORT_SYMBOL(invalidate_inode_buffers);
865 * Remove any clean buffers from the inode's buffer list. This is called
866 * when we're trying to free the inode itself. Those buffers can pin it.
868 * Returns true if all buffers were removed.
870 int remove_inode_buffers(struct inode *inode)
874 if (inode_has_buffers(inode)) {
875 struct address_space *mapping = &inode->i_data;
876 struct list_head *list = &mapping->private_list;
877 struct address_space *buffer_mapping = mapping->assoc_mapping;
879 spin_lock(&buffer_mapping->private_lock);
880 while (!list_empty(list)) {
881 struct buffer_head *bh = BH_ENTRY(list->next);
882 if (buffer_dirty(bh)) {
886 __remove_assoc_queue(bh);
888 spin_unlock(&buffer_mapping->private_lock);
894 * Create the appropriate buffers when given a page for data area and
895 * the size of each buffer.. Use the bh->b_this_page linked list to
896 * follow the buffers created. Return NULL if unable to create more
899 * The retry flag is used to differentiate async IO (paging, swapping)
900 * which may not fail from ordinary buffer allocations.
902 struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
905 struct buffer_head *bh, *head;
911 while ((offset -= size) >= 0) {
912 bh = alloc_buffer_head(GFP_NOFS);
917 bh->b_this_page = head;
922 atomic_set(&bh->b_count, 0);
923 bh->b_private = NULL;
926 /* Link the buffer to its page */
927 set_bh_page(bh, page, offset);
929 init_buffer(bh, NULL, NULL);
933 * In case anything failed, we just free everything we got.
939 head = head->b_this_page;
940 free_buffer_head(bh);
945 * Return failure for non-async IO requests. Async IO requests
946 * are not allowed to fail, so we have to wait until buffer heads
947 * become available. But we don't want tasks sleeping with
948 * partially complete buffers, so all were released above.
953 /* We're _really_ low on memory. Now we just
954 * wait for old buffer heads to become free due to
955 * finishing IO. Since this is an async request and
956 * the reserve list is empty, we're sure there are
957 * async buffer heads in use.
962 EXPORT_SYMBOL_GPL(alloc_page_buffers);
965 link_dev_buffers(struct page *page, struct buffer_head *head)
967 struct buffer_head *bh, *tail;
972 bh = bh->b_this_page;
974 tail->b_this_page = head;
975 attach_page_buffers(page, head);
979 * Initialise the state of a blockdev page's buffers.
982 init_page_buffers(struct page *page, struct block_device *bdev,
983 sector_t block, int size)
985 struct buffer_head *head = page_buffers(page);
986 struct buffer_head *bh = head;
987 int uptodate = PageUptodate(page);
990 if (!buffer_mapped(bh)) {
991 init_buffer(bh, NULL, NULL);
993 bh->b_blocknr = block;
995 set_buffer_uptodate(bh);
996 set_buffer_mapped(bh);
999 bh = bh->b_this_page;
1000 } while (bh != head);
1004 * Create the page-cache page that contains the requested block.
1006 * This is user purely for blockdev mappings.
1008 static struct page *
1009 grow_dev_page(struct block_device *bdev, sector_t block,
1010 pgoff_t index, int size)
1012 struct inode *inode = bdev->bd_inode;
1014 struct buffer_head *bh;
1016 page = find_or_create_page(inode->i_mapping, index,
1017 (mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS)|__GFP_MOVABLE);
1021 BUG_ON(!PageLocked(page));
1023 if (page_has_buffers(page)) {
1024 bh = page_buffers(page);
1025 if (bh->b_size == size) {
1026 init_page_buffers(page, bdev, block, size);
1029 if (!try_to_free_buffers(page))
1034 * Allocate some buffers for this page
1036 bh = alloc_page_buffers(page, size, 0);
1041 * Link the page to the buffers and initialise them. Take the
1042 * lock to be atomic wrt __find_get_block(), which does not
1043 * run under the page lock.
1045 spin_lock(&inode->i_mapping->private_lock);
1046 link_dev_buffers(page, bh);
1047 init_page_buffers(page, bdev, block, size);
1048 spin_unlock(&inode->i_mapping->private_lock);
1054 page_cache_release(page);
1059 * Create buffers for the specified block device block's page. If
1060 * that page was dirty, the buffers are set dirty also.
1063 grow_buffers(struct block_device *bdev, sector_t block, int size)
1072 } while ((size << sizebits) < PAGE_SIZE);
1074 index = block >> sizebits;
1077 * Check for a block which wants to lie outside our maximum possible
1078 * pagecache index. (this comparison is done using sector_t types).
1080 if (unlikely(index != block >> sizebits)) {
1081 char b[BDEVNAME_SIZE];
1083 printk(KERN_ERR "%s: requested out-of-range block %llu for "
1085 __func__, (unsigned long long)block,
1089 block = index << sizebits;
1090 /* Create a page with the proper size buffers.. */
1091 page = grow_dev_page(bdev, block, index, size);
1095 page_cache_release(page);
1099 static struct buffer_head *
1100 __getblk_slow(struct block_device *bdev, sector_t block, int size)
1102 /* Size must be multiple of hard sectorsize */
1103 if (unlikely(size & (bdev_logical_block_size(bdev)-1) ||
1104 (size < 512 || size > PAGE_SIZE))) {
1105 printk(KERN_ERR "getblk(): invalid block size %d requested\n",
1107 printk(KERN_ERR "logical block size: %d\n",
1108 bdev_logical_block_size(bdev));
1115 struct buffer_head * bh;
1118 bh = __find_get_block(bdev, block, size);
1122 ret = grow_buffers(bdev, block, size);
1131 * The relationship between dirty buffers and dirty pages:
1133 * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1134 * the page is tagged dirty in its radix tree.
1136 * At all times, the dirtiness of the buffers represents the dirtiness of
1137 * subsections of the page. If the page has buffers, the page dirty bit is
1138 * merely a hint about the true dirty state.
1140 * When a page is set dirty in its entirety, all its buffers are marked dirty
1141 * (if the page has buffers).
1143 * When a buffer is marked dirty, its page is dirtied, but the page's other
1146 * Also. When blockdev buffers are explicitly read with bread(), they
1147 * individually become uptodate. But their backing page remains not
1148 * uptodate - even if all of its buffers are uptodate. A subsequent
1149 * block_read_full_page() against that page will discover all the uptodate
1150 * buffers, will set the page uptodate and will perform no I/O.
1154 * mark_buffer_dirty - mark a buffer_head as needing writeout
1155 * @bh: the buffer_head to mark dirty
1157 * mark_buffer_dirty() will set the dirty bit against the buffer, then set its
1158 * backing page dirty, then tag the page as dirty in its address_space's radix
1159 * tree and then attach the address_space's inode to its superblock's dirty
1162 * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock,
1163 * mapping->tree_lock and the global inode_lock.
1165 void mark_buffer_dirty(struct buffer_head *bh)
1167 WARN_ON_ONCE(!buffer_uptodate(bh));
1170 * Very *carefully* optimize the it-is-already-dirty case.
1172 * Don't let the final "is it dirty" escape to before we
1173 * perhaps modified the buffer.
1175 if (buffer_dirty(bh)) {
1177 if (buffer_dirty(bh))
1181 if (!test_set_buffer_dirty(bh)) {
1182 struct page *page = bh->b_page;
1183 if (!TestSetPageDirty(page)) {
1184 struct address_space *mapping = page_mapping(page);
1186 __set_page_dirty(page, mapping, 0);
1190 EXPORT_SYMBOL(mark_buffer_dirty);
1193 * Decrement a buffer_head's reference count. If all buffers against a page
1194 * have zero reference count, are clean and unlocked, and if the page is clean
1195 * and unlocked then try_to_free_buffers() may strip the buffers from the page
1196 * in preparation for freeing it (sometimes, rarely, buffers are removed from
1197 * a page but it ends up not being freed, and buffers may later be reattached).
1199 void __brelse(struct buffer_head * buf)
1201 if (atomic_read(&buf->b_count)) {
1205 WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n");
1207 EXPORT_SYMBOL(__brelse);
1210 * bforget() is like brelse(), except it discards any
1211 * potentially dirty data.
1213 void __bforget(struct buffer_head *bh)
1215 clear_buffer_dirty(bh);
1216 if (bh->b_assoc_map) {
1217 struct address_space *buffer_mapping = bh->b_page->mapping;
1219 spin_lock(&buffer_mapping->private_lock);
1220 list_del_init(&bh->b_assoc_buffers);
1221 bh->b_assoc_map = NULL;
1222 spin_unlock(&buffer_mapping->private_lock);
1226 EXPORT_SYMBOL(__bforget);
1228 static struct buffer_head *__bread_slow(struct buffer_head *bh)
1231 if (buffer_uptodate(bh)) {
1236 bh->b_end_io = end_buffer_read_sync;
1237 submit_bh(READ, bh);
1239 if (buffer_uptodate(bh))
1247 * Per-cpu buffer LRU implementation. To reduce the cost of __find_get_block().
1248 * The bhs[] array is sorted - newest buffer is at bhs[0]. Buffers have their
1249 * refcount elevated by one when they're in an LRU. A buffer can only appear
1250 * once in a particular CPU's LRU. A single buffer can be present in multiple
1251 * CPU's LRUs at the same time.
1253 * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1254 * sb_find_get_block().
1256 * The LRUs themselves only need locking against invalidate_bh_lrus. We use
1257 * a local interrupt disable for that.
1260 #define BH_LRU_SIZE 8
1263 struct buffer_head *bhs[BH_LRU_SIZE];
1266 static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
1269 #define bh_lru_lock() local_irq_disable()
1270 #define bh_lru_unlock() local_irq_enable()
1272 #define bh_lru_lock() preempt_disable()
1273 #define bh_lru_unlock() preempt_enable()
1276 static inline void check_irqs_on(void)
1278 #ifdef irqs_disabled
1279 BUG_ON(irqs_disabled());
1284 * The LRU management algorithm is dopey-but-simple. Sorry.
1286 static void bh_lru_install(struct buffer_head *bh)
1288 struct buffer_head *evictee = NULL;
1293 lru = &__get_cpu_var(bh_lrus);
1294 if (lru->bhs[0] != bh) {
1295 struct buffer_head *bhs[BH_LRU_SIZE];
1301 for (in = 0; in < BH_LRU_SIZE; in++) {
1302 struct buffer_head *bh2 = lru->bhs[in];
1307 if (out >= BH_LRU_SIZE) {
1308 BUG_ON(evictee != NULL);
1315 while (out < BH_LRU_SIZE)
1317 memcpy(lru->bhs, bhs, sizeof(bhs));
1326 * Look up the bh in this cpu's LRU. If it's there, move it to the head.
1328 static struct buffer_head *
1329 lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
1331 struct buffer_head *ret = NULL;
1337 lru = &__get_cpu_var(bh_lrus);
1338 for (i = 0; i < BH_LRU_SIZE; i++) {
1339 struct buffer_head *bh = lru->bhs[i];
1341 if (bh && bh->b_bdev == bdev &&
1342 bh->b_blocknr == block && bh->b_size == size) {
1345 lru->bhs[i] = lru->bhs[i - 1];
1360 * Perform a pagecache lookup for the matching buffer. If it's there, refresh
1361 * it in the LRU and mark it as accessed. If it is not present then return
1364 struct buffer_head *
1365 __find_get_block(struct block_device *bdev, sector_t block, unsigned size)
1367 struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1370 bh = __find_get_block_slow(bdev, block);
1378 EXPORT_SYMBOL(__find_get_block);
1381 * __getblk will locate (and, if necessary, create) the buffer_head
1382 * which corresponds to the passed block_device, block and size. The
1383 * returned buffer has its reference count incremented.
1385 * __getblk() cannot fail - it just keeps trying. If you pass it an
1386 * illegal block number, __getblk() will happily return a buffer_head
1387 * which represents the non-existent block. Very weird.
1389 * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
1390 * attempt is failing. FIXME, perhaps?
1392 struct buffer_head *
1393 __getblk(struct block_device *bdev, sector_t block, unsigned size)
1395 struct buffer_head *bh = __find_get_block(bdev, block, size);
1399 bh = __getblk_slow(bdev, block, size);
1402 EXPORT_SYMBOL(__getblk);
1405 * Do async read-ahead on a buffer..
1407 void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
1409 struct buffer_head *bh = __getblk(bdev, block, size);
1411 ll_rw_block(READA, 1, &bh);
1415 EXPORT_SYMBOL(__breadahead);
1418 * __bread() - reads a specified block and returns the bh
1419 * @bdev: the block_device to read from
1420 * @block: number of block
1421 * @size: size (in bytes) to read
1423 * Reads a specified block, and returns buffer head that contains it.
1424 * It returns NULL if the block was unreadable.
1426 struct buffer_head *
1427 __bread(struct block_device *bdev, sector_t block, unsigned size)
1429 struct buffer_head *bh = __getblk(bdev, block, size);
1431 if (likely(bh) && !buffer_uptodate(bh))
1432 bh = __bread_slow(bh);
1435 EXPORT_SYMBOL(__bread);
1438 * invalidate_bh_lrus() is called rarely - but not only at unmount.
1439 * This doesn't race because it runs in each cpu either in irq
1440 * or with preempt disabled.
1442 static void invalidate_bh_lru(void *arg)
1444 struct bh_lru *b = &get_cpu_var(bh_lrus);
1447 for (i = 0; i < BH_LRU_SIZE; i++) {
1451 put_cpu_var(bh_lrus);
1454 void invalidate_bh_lrus(void)
1456 on_each_cpu(invalidate_bh_lru, NULL, 1);
1458 EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
1460 void set_bh_page(struct buffer_head *bh,
1461 struct page *page, unsigned long offset)
1464 BUG_ON(offset >= PAGE_SIZE);
1465 if (PageHighMem(page))
1467 * This catches illegal uses and preserves the offset:
1469 bh->b_data = (char *)(0 + offset);
1471 bh->b_data = page_address(page) + offset;
1473 EXPORT_SYMBOL(set_bh_page);
1476 * Called when truncating a buffer on a page completely.
1478 static void discard_buffer(struct buffer_head * bh)
1481 clear_buffer_dirty(bh);
1483 clear_buffer_mapped(bh);
1484 clear_buffer_req(bh);
1485 clear_buffer_new(bh);
1486 clear_buffer_delay(bh);
1487 clear_buffer_unwritten(bh);
1492 * block_invalidatepage - invalidate part of all of a buffer-backed page
1494 * @page: the page which is affected
1495 * @offset: the index of the truncation point
1497 * block_invalidatepage() is called when all or part of the page has become
1498 * invalidatedby a truncate operation.
1500 * block_invalidatepage() does not have to release all buffers, but it must
1501 * ensure that no dirty buffer is left outside @offset and that no I/O
1502 * is underway against any of the blocks which are outside the truncation
1503 * point. Because the caller is about to free (and possibly reuse) those
1506 void block_invalidatepage(struct page *page, unsigned long offset)
1508 struct buffer_head *head, *bh, *next;
1509 unsigned int curr_off = 0;
1511 BUG_ON(!PageLocked(page));
1512 if (!page_has_buffers(page))
1515 head = page_buffers(page);
1518 unsigned int next_off = curr_off + bh->b_size;
1519 next = bh->b_this_page;
1522 * is this block fully invalidated?
1524 if (offset <= curr_off)
1526 curr_off = next_off;
1528 } while (bh != head);
1531 * We release buffers only if the entire page is being invalidated.
1532 * The get_block cached value has been unconditionally invalidated,
1533 * so real IO is not possible anymore.
1536 try_to_release_page(page, 0);
1540 EXPORT_SYMBOL(block_invalidatepage);
1543 * We attach and possibly dirty the buffers atomically wrt
1544 * __set_page_dirty_buffers() via private_lock. try_to_free_buffers
1545 * is already excluded via the page lock.
1547 void create_empty_buffers(struct page *page,
1548 unsigned long blocksize, unsigned long b_state)
1550 struct buffer_head *bh, *head, *tail;
1552 head = alloc_page_buffers(page, blocksize, 1);
1555 bh->b_state |= b_state;
1557 bh = bh->b_this_page;
1559 tail->b_this_page = head;
1561 spin_lock(&page->mapping->private_lock);
1562 if (PageUptodate(page) || PageDirty(page)) {
1565 if (PageDirty(page))
1566 set_buffer_dirty(bh);
1567 if (PageUptodate(page))
1568 set_buffer_uptodate(bh);
1569 bh = bh->b_this_page;
1570 } while (bh != head);
1572 attach_page_buffers(page, head);
1573 spin_unlock(&page->mapping->private_lock);
1575 EXPORT_SYMBOL(create_empty_buffers);
1578 * We are taking a block for data and we don't want any output from any
1579 * buffer-cache aliases starting from return from that function and
1580 * until the moment when something will explicitly mark the buffer
1581 * dirty (hopefully that will not happen until we will free that block ;-)
1582 * We don't even need to mark it not-uptodate - nobody can expect
1583 * anything from a newly allocated buffer anyway. We used to used
1584 * unmap_buffer() for such invalidation, but that was wrong. We definitely
1585 * don't want to mark the alias unmapped, for example - it would confuse
1586 * anyone who might pick it with bread() afterwards...
1588 * Also.. Note that bforget() doesn't lock the buffer. So there can
1589 * be writeout I/O going on against recently-freed buffers. We don't
1590 * wait on that I/O in bforget() - it's more efficient to wait on the I/O
1591 * only if we really need to. That happens here.
1593 void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
1595 struct buffer_head *old_bh;
1599 old_bh = __find_get_block_slow(bdev, block);
1601 clear_buffer_dirty(old_bh);
1602 wait_on_buffer(old_bh);
1603 clear_buffer_req(old_bh);
1607 EXPORT_SYMBOL(unmap_underlying_metadata);
1610 * NOTE! All mapped/uptodate combinations are valid:
1612 * Mapped Uptodate Meaning
1614 * No No "unknown" - must do get_block()
1615 * No Yes "hole" - zero-filled
1616 * Yes No "allocated" - allocated on disk, not read in
1617 * Yes Yes "valid" - allocated and up-to-date in memory.
1619 * "Dirty" is valid only with the last case (mapped+uptodate).
1623 * While block_write_full_page is writing back the dirty buffers under
1624 * the page lock, whoever dirtied the buffers may decide to clean them
1625 * again at any time. We handle that by only looking at the buffer
1626 * state inside lock_buffer().
1628 * If block_write_full_page() is called for regular writeback
1629 * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1630 * locked buffer. This only can happen if someone has written the buffer
1631 * directly, with submit_bh(). At the address_space level PageWriteback
1632 * prevents this contention from occurring.
1634 * If block_write_full_page() is called with wbc->sync_mode ==
1635 * WB_SYNC_ALL, the writes are posted using WRITE_SYNC_PLUG; this
1636 * causes the writes to be flagged as synchronous writes, but the
1637 * block device queue will NOT be unplugged, since usually many pages
1638 * will be pushed to the out before the higher-level caller actually
1639 * waits for the writes to be completed. The various wait functions,
1640 * such as wait_on_writeback_range() will ultimately call sync_page()
1641 * which will ultimately call blk_run_backing_dev(), which will end up
1642 * unplugging the device queue.
1644 static int __block_write_full_page(struct inode *inode, struct page *page,
1645 get_block_t *get_block, struct writeback_control *wbc,
1646 bh_end_io_t *handler)
1650 sector_t last_block;
1651 struct buffer_head *bh, *head;
1652 const unsigned blocksize = 1 << inode->i_blkbits;
1653 int nr_underway = 0;
1654 int write_op = (wbc->sync_mode == WB_SYNC_ALL ?
1655 WRITE_SYNC_PLUG : WRITE);
1657 BUG_ON(!PageLocked(page));
1659 last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
1661 if (!page_has_buffers(page)) {
1662 create_empty_buffers(page, blocksize,
1663 (1 << BH_Dirty)|(1 << BH_Uptodate));
1667 * Be very careful. We have no exclusion from __set_page_dirty_buffers
1668 * here, and the (potentially unmapped) buffers may become dirty at
1669 * any time. If a buffer becomes dirty here after we've inspected it
1670 * then we just miss that fact, and the page stays dirty.
1672 * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
1673 * handle that here by just cleaning them.
1676 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1677 head = page_buffers(page);
1681 * Get all the dirty buffers mapped to disk addresses and
1682 * handle any aliases from the underlying blockdev's mapping.
1685 if (block > last_block) {
1687 * mapped buffers outside i_size will occur, because
1688 * this page can be outside i_size when there is a
1689 * truncate in progress.
1692 * The buffer was zeroed by block_write_full_page()
1694 clear_buffer_dirty(bh);
1695 set_buffer_uptodate(bh);
1696 } else if ((!buffer_mapped(bh) || buffer_delay(bh)) &&
1698 WARN_ON(bh->b_size != blocksize);
1699 err = get_block(inode, block, bh, 1);
1702 clear_buffer_delay(bh);
1703 if (buffer_new(bh)) {
1704 /* blockdev mappings never come here */
1705 clear_buffer_new(bh);
1706 unmap_underlying_metadata(bh->b_bdev,
1710 bh = bh->b_this_page;
1712 } while (bh != head);
1715 if (!buffer_mapped(bh))
1718 * If it's a fully non-blocking write attempt and we cannot
1719 * lock the buffer then redirty the page. Note that this can
1720 * potentially cause a busy-wait loop from writeback threads
1721 * and kswapd activity, but those code paths have their own
1722 * higher-level throttling.
1724 if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
1726 } else if (!trylock_buffer(bh)) {
1727 redirty_page_for_writepage(wbc, page);
1730 if (test_clear_buffer_dirty(bh)) {
1731 mark_buffer_async_write_endio(bh, handler);
1735 } while ((bh = bh->b_this_page) != head);
1738 * The page and its buffers are protected by PageWriteback(), so we can
1739 * drop the bh refcounts early.
1741 BUG_ON(PageWriteback(page));
1742 set_page_writeback(page);
1745 struct buffer_head *next = bh->b_this_page;
1746 if (buffer_async_write(bh)) {
1747 submit_bh(write_op, bh);
1751 } while (bh != head);
1756 if (nr_underway == 0) {
1758 * The page was marked dirty, but the buffers were
1759 * clean. Someone wrote them back by hand with
1760 * ll_rw_block/submit_bh. A rare case.
1762 end_page_writeback(page);
1765 * The page and buffer_heads can be released at any time from
1773 * ENOSPC, or some other error. We may already have added some
1774 * blocks to the file, so we need to write these out to avoid
1775 * exposing stale data.
1776 * The page is currently locked and not marked for writeback
1779 /* Recovery: lock and submit the mapped buffers */
1781 if (buffer_mapped(bh) && buffer_dirty(bh) &&
1782 !buffer_delay(bh)) {
1784 mark_buffer_async_write_endio(bh, handler);
1787 * The buffer may have been set dirty during
1788 * attachment to a dirty page.
1790 clear_buffer_dirty(bh);
1792 } while ((bh = bh->b_this_page) != head);
1794 BUG_ON(PageWriteback(page));
1795 mapping_set_error(page->mapping, err);
1796 set_page_writeback(page);
1798 struct buffer_head *next = bh->b_this_page;
1799 if (buffer_async_write(bh)) {
1800 clear_buffer_dirty(bh);
1801 submit_bh(write_op, bh);
1805 } while (bh != head);
1811 * If a page has any new buffers, zero them out here, and mark them uptodate
1812 * and dirty so they'll be written out (in order to prevent uninitialised
1813 * block data from leaking). And clear the new bit.
1815 void page_zero_new_buffers(struct page *page, unsigned from, unsigned to)
1817 unsigned int block_start, block_end;
1818 struct buffer_head *head, *bh;
1820 BUG_ON(!PageLocked(page));
1821 if (!page_has_buffers(page))
1824 bh = head = page_buffers(page);
1827 block_end = block_start + bh->b_size;
1829 if (buffer_new(bh)) {
1830 if (block_end > from && block_start < to) {
1831 if (!PageUptodate(page)) {
1832 unsigned start, size;
1834 start = max(from, block_start);
1835 size = min(to, block_end) - start;
1837 zero_user(page, start, size);
1838 set_buffer_uptodate(bh);
1841 clear_buffer_new(bh);
1842 mark_buffer_dirty(bh);
1846 block_start = block_end;
1847 bh = bh->b_this_page;
1848 } while (bh != head);
1850 EXPORT_SYMBOL(page_zero_new_buffers);
1852 static int __block_prepare_write(struct inode *inode, struct page *page,
1853 unsigned from, unsigned to, get_block_t *get_block)
1855 unsigned block_start, block_end;
1858 unsigned blocksize, bbits;
1859 struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
1861 BUG_ON(!PageLocked(page));
1862 BUG_ON(from > PAGE_CACHE_SIZE);
1863 BUG_ON(to > PAGE_CACHE_SIZE);
1866 blocksize = 1 << inode->i_blkbits;
1867 if (!page_has_buffers(page))
1868 create_empty_buffers(page, blocksize, 0);
1869 head = page_buffers(page);
1871 bbits = inode->i_blkbits;
1872 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
1874 for(bh = head, block_start = 0; bh != head || !block_start;
1875 block++, block_start=block_end, bh = bh->b_this_page) {
1876 block_end = block_start + blocksize;
1877 if (block_end <= from || block_start >= to) {
1878 if (PageUptodate(page)) {
1879 if (!buffer_uptodate(bh))
1880 set_buffer_uptodate(bh);
1885 clear_buffer_new(bh);
1886 if (!buffer_mapped(bh)) {
1887 WARN_ON(bh->b_size != blocksize);
1888 err = get_block(inode, block, bh, 1);
1891 if (buffer_new(bh)) {
1892 unmap_underlying_metadata(bh->b_bdev,
1894 if (PageUptodate(page)) {
1895 clear_buffer_new(bh);
1896 set_buffer_uptodate(bh);
1897 mark_buffer_dirty(bh);
1900 if (block_end > to || block_start < from)
1901 zero_user_segments(page,
1907 if (PageUptodate(page)) {
1908 if (!buffer_uptodate(bh))
1909 set_buffer_uptodate(bh);
1912 if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
1913 !buffer_unwritten(bh) &&
1914 (block_start < from || block_end > to)) {
1915 ll_rw_block(READ, 1, &bh);
1920 * If we issued read requests - let them complete.
1922 while(wait_bh > wait) {
1925 ret = wait_on_buffer_async(*--wait_bh, current->io_wait);
1927 WARN(1, "%s: ret\n", __FUNCTION__);
1930 if (!buffer_uptodate(*wait_bh))
1934 page_zero_new_buffers(page, from, to);
1938 static int __block_commit_write(struct inode *inode, struct page *page,
1939 unsigned from, unsigned to)
1941 unsigned block_start, block_end;
1944 struct buffer_head *bh, *head;
1946 blocksize = 1 << inode->i_blkbits;
1948 for(bh = head = page_buffers(page), block_start = 0;
1949 bh != head || !block_start;
1950 block_start=block_end, bh = bh->b_this_page) {
1951 block_end = block_start + blocksize;
1952 if (block_end <= from || block_start >= to) {
1953 if (!buffer_uptodate(bh))
1956 set_buffer_uptodate(bh);
1957 mark_buffer_dirty(bh);
1959 clear_buffer_new(bh);
1963 * If this is a partial write which happened to make all buffers
1964 * uptodate then we can optimize away a bogus readpage() for
1965 * the next read(). Here we 'discover' whether the page went
1966 * uptodate as a result of this (potentially partial) write.
1969 SetPageUptodate(page);
1974 * block_write_begin takes care of the basic task of block allocation and
1975 * bringing partial write blocks uptodate first.
1977 * If *pagep is not NULL, then block_write_begin uses the locked page
1978 * at *pagep rather than allocating its own. In this case, the page will
1979 * not be unlocked or deallocated on failure.
1981 int block_write_begin(struct file *file, struct address_space *mapping,
1982 loff_t pos, unsigned len, unsigned flags,
1983 struct page **pagep, void **fsdata,
1984 get_block_t *get_block)
1986 struct inode *inode = mapping->host;
1990 unsigned start, end;
1993 index = pos >> PAGE_CACHE_SHIFT;
1994 start = pos & (PAGE_CACHE_SIZE - 1);
2000 page = grab_cache_page_write_begin(mapping, index, flags);
2007 BUG_ON(!PageLocked(page));
2009 status = __block_prepare_write(inode, page, start, end, get_block);
2010 if (unlikely(status)) {
2011 ClearPageUptodate(page);
2015 page_cache_release(page);
2019 * prepare_write() may have instantiated a few blocks
2020 * outside i_size. Trim these off again. Don't need
2021 * i_size_read because we hold i_mutex.
2023 if (pos + len > inode->i_size)
2024 vmtruncate(inode, inode->i_size);
2031 EXPORT_SYMBOL(block_write_begin);
2033 int block_write_end(struct file *file, struct address_space *mapping,
2034 loff_t pos, unsigned len, unsigned copied,
2035 struct page *page, void *fsdata)
2037 struct inode *inode = mapping->host;
2040 start = pos & (PAGE_CACHE_SIZE - 1);
2042 if (unlikely(copied < len)) {
2044 * The buffers that were written will now be uptodate, so we
2045 * don't have to worry about a readpage reading them and
2046 * overwriting a partial write. However if we have encountered
2047 * a short write and only partially written into a buffer, it
2048 * will not be marked uptodate, so a readpage might come in and
2049 * destroy our partial write.
2051 * Do the simplest thing, and just treat any short write to a
2052 * non uptodate page as a zero-length write, and force the
2053 * caller to redo the whole thing.
2055 if (!PageUptodate(page))
2058 page_zero_new_buffers(page, start+copied, start+len);
2060 flush_dcache_page(page);
2062 /* This could be a short (even 0-length) commit */
2063 __block_commit_write(inode, page, start, start+copied);
2067 EXPORT_SYMBOL(block_write_end);
2069 int generic_write_end(struct file *file, struct address_space *mapping,
2070 loff_t pos, unsigned len, unsigned copied,
2071 struct page *page, void *fsdata)
2073 struct inode *inode = mapping->host;
2074 int i_size_changed = 0;
2076 copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
2079 * No need to use i_size_read() here, the i_size
2080 * cannot change under us because we hold i_mutex.
2082 * But it's important to update i_size while still holding page lock:
2083 * page writeout could otherwise come in and zero beyond i_size.
2085 if (pos+copied > inode->i_size) {
2086 i_size_write(inode, pos+copied);
2091 page_cache_release(page);
2094 * Don't mark the inode dirty under page lock. First, it unnecessarily
2095 * makes the holding time of page lock longer. Second, it forces lock
2096 * ordering of page lock and transaction start for journaling
2100 mark_inode_dirty(inode);
2104 EXPORT_SYMBOL(generic_write_end);
2107 * block_is_partially_uptodate checks whether buffers within a page are
2110 * Returns true if all buffers which correspond to a file portion
2111 * we want to read are uptodate.
2113 int block_is_partially_uptodate(struct page *page, read_descriptor_t *desc,
2116 struct inode *inode = page->mapping->host;
2117 unsigned block_start, block_end, blocksize;
2119 struct buffer_head *bh, *head;
2122 if (!page_has_buffers(page))
2125 blocksize = 1 << inode->i_blkbits;
2126 to = min_t(unsigned, PAGE_CACHE_SIZE - from, desc->count);
2128 if (from < blocksize && to > PAGE_CACHE_SIZE - blocksize)
2131 head = page_buffers(page);
2135 block_end = block_start + blocksize;
2136 if (block_end > from && block_start < to) {
2137 if (!buffer_uptodate(bh)) {
2141 if (block_end >= to)
2144 block_start = block_end;
2145 bh = bh->b_this_page;
2146 } while (bh != head);
2150 EXPORT_SYMBOL(block_is_partially_uptodate);
2153 * Generic "read page" function for block devices that have the normal
2154 * get_block functionality. This is most of the block device filesystems.
2155 * Reads the page asynchronously --- the unlock_buffer() and
2156 * set/clear_buffer_uptodate() functions propagate buffer state into the
2157 * page struct once IO has completed.
2159 int block_read_full_page(struct page *page, get_block_t *get_block)
2161 struct inode *inode = page->mapping->host;
2162 sector_t iblock, lblock;
2163 struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
2164 unsigned int blocksize;
2166 int fully_mapped = 1;
2168 BUG_ON(!PageLocked(page));
2169 blocksize = 1 << inode->i_blkbits;
2170 if (!page_has_buffers(page))
2171 create_empty_buffers(page, blocksize, 0);
2172 head = page_buffers(page);
2174 iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2175 lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits;
2181 if (buffer_uptodate(bh))
2184 if (!buffer_mapped(bh)) {
2188 if (iblock < lblock) {
2189 WARN_ON(bh->b_size != blocksize);
2190 err = get_block(inode, iblock, bh, 0);
2194 if (!buffer_mapped(bh)) {
2195 zero_user(page, i * blocksize, blocksize);
2197 set_buffer_uptodate(bh);
2201 * get_block() might have updated the buffer
2204 if (buffer_uptodate(bh))
2208 } while (i++, iblock++, (bh = bh->b_this_page) != head);
2211 SetPageMappedToDisk(page);
2215 * All buffers are uptodate - we can set the page uptodate
2216 * as well. But not if get_block() returned an error.
2218 if (!PageError(page))
2219 SetPageUptodate(page);
2224 /* Stage two: lock the buffers */
2225 for (i = 0; i < nr; i++) {
2228 mark_buffer_async_read(bh);
2232 * Stage 3: start the IO. Check for uptodateness
2233 * inside the buffer lock in case another process reading
2234 * the underlying blockdev brought it uptodate (the sct fix).
2236 for (i = 0; i < nr; i++) {
2238 if (buffer_uptodate(bh))
2239 end_buffer_async_read(bh, 1);
2241 submit_bh(READ, bh);
2245 EXPORT_SYMBOL(block_read_full_page);
2247 /* utility function for filesystems that need to do work on expanding
2248 * truncates. Uses filesystem pagecache writes to allow the filesystem to
2249 * deal with the hole.
2251 int generic_cont_expand_simple(struct inode *inode, loff_t size)
2253 struct address_space *mapping = inode->i_mapping;
2258 err = inode_newsize_ok(inode, size);
2262 err = pagecache_write_begin(NULL, mapping, size, 0,
2263 AOP_FLAG_UNINTERRUPTIBLE|AOP_FLAG_CONT_EXPAND,
2268 err = pagecache_write_end(NULL, mapping, size, 0, 0, page, fsdata);
2274 EXPORT_SYMBOL(generic_cont_expand_simple);
2276 static int cont_expand_zero(struct file *file, struct address_space *mapping,
2277 loff_t pos, loff_t *bytes)
2279 struct inode *inode = mapping->host;
2280 unsigned blocksize = 1 << inode->i_blkbits;
2283 pgoff_t index, curidx;
2285 unsigned zerofrom, offset, len;
2288 index = pos >> PAGE_CACHE_SHIFT;
2289 offset = pos & ~PAGE_CACHE_MASK;
2291 while (index > (curidx = (curpos = *bytes)>>PAGE_CACHE_SHIFT)) {
2292 zerofrom = curpos & ~PAGE_CACHE_MASK;
2293 if (zerofrom & (blocksize-1)) {
2294 *bytes |= (blocksize-1);
2297 len = PAGE_CACHE_SIZE - zerofrom;
2299 err = pagecache_write_begin(file, mapping, curpos, len,
2300 AOP_FLAG_UNINTERRUPTIBLE,
2304 zero_user(page, zerofrom, len);
2305 err = pagecache_write_end(file, mapping, curpos, len, len,
2312 balance_dirty_pages_ratelimited(mapping);
2315 /* page covers the boundary, find the boundary offset */
2316 if (index == curidx) {
2317 zerofrom = curpos & ~PAGE_CACHE_MASK;
2318 /* if we will expand the thing last block will be filled */
2319 if (offset <= zerofrom) {
2322 if (zerofrom & (blocksize-1)) {
2323 *bytes |= (blocksize-1);
2326 len = offset - zerofrom;
2328 err = pagecache_write_begin(file, mapping, curpos, len,
2329 AOP_FLAG_UNINTERRUPTIBLE,
2333 zero_user(page, zerofrom, len);
2334 err = pagecache_write_end(file, mapping, curpos, len, len,
2346 * For moronic filesystems that do not allow holes in file.
2347 * We may have to extend the file.
2349 int cont_write_begin(struct file *file, struct address_space *mapping,
2350 loff_t pos, unsigned len, unsigned flags,
2351 struct page **pagep, void **fsdata,
2352 get_block_t *get_block, loff_t *bytes)
2354 struct inode *inode = mapping->host;
2355 unsigned blocksize = 1 << inode->i_blkbits;
2359 err = cont_expand_zero(file, mapping, pos, bytes);
2363 zerofrom = *bytes & ~PAGE_CACHE_MASK;
2364 if (pos+len > *bytes && zerofrom & (blocksize-1)) {
2365 *bytes |= (blocksize-1);
2370 err = block_write_begin(file, mapping, pos, len,
2371 flags, pagep, fsdata, get_block);
2375 EXPORT_SYMBOL(cont_write_begin);
2377 int block_prepare_write(struct page *page, unsigned from, unsigned to,
2378 get_block_t *get_block)
2380 struct inode *inode = page->mapping->host;
2381 int err = __block_prepare_write(inode, page, from, to, get_block);
2383 ClearPageUptodate(page);
2386 EXPORT_SYMBOL(block_prepare_write);
2388 int block_commit_write(struct page *page, unsigned from, unsigned to)
2390 struct inode *inode = page->mapping->host;
2391 __block_commit_write(inode,page,from,to);
2394 EXPORT_SYMBOL(block_commit_write);
2397 * block_page_mkwrite() is not allowed to change the file size as it gets
2398 * called from a page fault handler when a page is first dirtied. Hence we must
2399 * be careful to check for EOF conditions here. We set the page up correctly
2400 * for a written page which means we get ENOSPC checking when writing into
2401 * holes and correct delalloc and unwritten extent mapping on filesystems that
2402 * support these features.
2404 * We are not allowed to take the i_mutex here so we have to play games to
2405 * protect against truncate races as the page could now be beyond EOF. Because
2406 * vmtruncate() writes the inode size before removing pages, once we have the
2407 * page lock we can determine safely if the page is beyond EOF. If it is not
2408 * beyond EOF, then the page is guaranteed safe against truncation until we
2412 block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
2413 get_block_t get_block)
2415 struct page *page = vmf->page;
2416 struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
2419 int ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */
2422 size = i_size_read(inode);
2423 if ((page->mapping != inode->i_mapping) ||
2424 (page_offset(page) > size)) {
2425 /* page got truncated out from underneath us */
2430 /* page is wholly or partially inside EOF */
2431 if (((page->index + 1) << PAGE_CACHE_SHIFT) > size)
2432 end = size & ~PAGE_CACHE_MASK;
2434 end = PAGE_CACHE_SIZE;
2436 ret = block_prepare_write(page, 0, end, get_block);
2438 ret = block_commit_write(page, 0, end);
2440 if (unlikely(ret)) {
2444 else /* -ENOSPC, -EIO, etc */
2445 ret = VM_FAULT_SIGBUS;
2447 ret = VM_FAULT_LOCKED;
2452 EXPORT_SYMBOL(block_page_mkwrite);
2455 * nobh_write_begin()'s prereads are special: the buffer_heads are freed
2456 * immediately, while under the page lock. So it needs a special end_io
2457 * handler which does not touch the bh after unlocking it.
2459 static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
2461 __end_buffer_read_notouch(bh, uptodate);
2465 * Attach the singly-linked list of buffers created by nobh_write_begin, to
2466 * the page (converting it to circular linked list and taking care of page
2469 static void attach_nobh_buffers(struct page *page, struct buffer_head *head)
2471 struct buffer_head *bh;
2473 BUG_ON(!PageLocked(page));
2475 spin_lock(&page->mapping->private_lock);
2478 if (PageDirty(page))
2479 set_buffer_dirty(bh);
2480 if (!bh->b_this_page)
2481 bh->b_this_page = head;
2482 bh = bh->b_this_page;
2483 } while (bh != head);
2484 attach_page_buffers(page, head);
2485 spin_unlock(&page->mapping->private_lock);
2489 * On entry, the page is fully not uptodate.
2490 * On exit the page is fully uptodate in the areas outside (from,to)
2492 int nobh_write_begin(struct file *file, struct address_space *mapping,
2493 loff_t pos, unsigned len, unsigned flags,
2494 struct page **pagep, void **fsdata,
2495 get_block_t *get_block)
2497 struct inode *inode = mapping->host;
2498 const unsigned blkbits = inode->i_blkbits;
2499 const unsigned blocksize = 1 << blkbits;
2500 struct buffer_head *head, *bh;
2504 unsigned block_in_page;
2505 unsigned block_start, block_end;
2506 sector_t block_in_file;
2509 int is_mapped_to_disk = 1;
2511 index = pos >> PAGE_CACHE_SHIFT;
2512 from = pos & (PAGE_CACHE_SIZE - 1);
2515 page = grab_cache_page_write_begin(mapping, index, flags);
2521 if (page_has_buffers(page)) {
2523 page_cache_release(page);
2525 return block_write_begin(file, mapping, pos, len, flags, pagep,
2529 if (PageMappedToDisk(page))
2533 * Allocate buffers so that we can keep track of state, and potentially
2534 * attach them to the page if an error occurs. In the common case of
2535 * no error, they will just be freed again without ever being attached
2536 * to the page (which is all OK, because we're under the page lock).
2538 * Be careful: the buffer linked list is a NULL terminated one, rather
2539 * than the circular one we're used to.
2541 head = alloc_page_buffers(page, blocksize, 0);
2547 block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
2550 * We loop across all blocks in the page, whether or not they are
2551 * part of the affected region. This is so we can discover if the
2552 * page is fully mapped-to-disk.
2554 for (block_start = 0, block_in_page = 0, bh = head;
2555 block_start < PAGE_CACHE_SIZE;
2556 block_in_page++, block_start += blocksize, bh = bh->b_this_page) {
2559 block_end = block_start + blocksize;
2562 if (block_start >= to)
2564 ret = get_block(inode, block_in_file + block_in_page,
2568 if (!buffer_mapped(bh))
2569 is_mapped_to_disk = 0;
2571 unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
2572 if (PageUptodate(page)) {
2573 set_buffer_uptodate(bh);
2576 if (buffer_new(bh) || !buffer_mapped(bh)) {
2577 zero_user_segments(page, block_start, from,
2581 if (buffer_uptodate(bh))
2582 continue; /* reiserfs does this */
2583 if (block_start < from || block_end > to) {
2585 bh->b_end_io = end_buffer_read_nobh;
2586 submit_bh(READ, bh);
2593 * The page is locked, so these buffers are protected from
2594 * any VM or truncate activity. Hence we don't need to care
2595 * for the buffer_head refcounts.
2597 for (bh = head; bh; bh = bh->b_this_page) {
2600 err = wait_on_buffer_async(bh, current->io_wait);
2602 WARN(1, "%s: ret\n", __FUNCTION__);
2605 if (!buffer_uptodate(bh))
2612 if (is_mapped_to_disk)
2613 SetPageMappedToDisk(page);
2615 *fsdata = head; /* to be released by nobh_write_end */
2622 * Error recovery is a bit difficult. We need to zero out blocks that
2623 * were newly allocated, and dirty them to ensure they get written out.
2624 * Buffers need to be attached to the page at this point, otherwise
2625 * the handling of potential IO errors during writeout would be hard
2626 * (could try doing synchronous writeout, but what if that fails too?)
2628 attach_nobh_buffers(page, head);
2629 page_zero_new_buffers(page, from, to);
2633 page_cache_release(page);
2636 if (pos + len > inode->i_size)
2637 vmtruncate(inode, inode->i_size);
2641 EXPORT_SYMBOL(nobh_write_begin);
2643 int nobh_write_end(struct file *file, struct address_space *mapping,
2644 loff_t pos, unsigned len, unsigned copied,
2645 struct page *page, void *fsdata)
2647 struct inode *inode = page->mapping->host;
2648 struct buffer_head *head = fsdata;
2649 struct buffer_head *bh;
2650 BUG_ON(fsdata != NULL && page_has_buffers(page));
2652 if (unlikely(copied < len) && head)
2653 attach_nobh_buffers(page, head);
2654 if (page_has_buffers(page))
2655 return generic_write_end(file, mapping, pos, len,
2656 copied, page, fsdata);
2658 SetPageUptodate(page);
2659 set_page_dirty(page);
2660 if (pos+copied > inode->i_size) {
2661 i_size_write(inode, pos+copied);
2662 mark_inode_dirty(inode);
2666 page_cache_release(page);
2670 head = head->b_this_page;
2671 free_buffer_head(bh);
2676 EXPORT_SYMBOL(nobh_write_end);
2679 * nobh_writepage() - based on block_full_write_page() except
2680 * that it tries to operate without attaching bufferheads to
2683 int nobh_writepage(struct page *page, get_block_t *get_block,
2684 struct writeback_control *wbc)
2686 struct inode * const inode = page->mapping->host;
2687 loff_t i_size = i_size_read(inode);
2688 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2692 /* Is the page fully inside i_size? */
2693 if (page->index < end_index)
2696 /* Is the page fully outside i_size? (truncate in progress) */
2697 offset = i_size & (PAGE_CACHE_SIZE-1);
2698 if (page->index >= end_index+1 || !offset) {
2700 * The page may have dirty, unmapped buffers. For example,
2701 * they may have been added in ext3_writepage(). Make them
2702 * freeable here, so the page does not leak.
2705 /* Not really sure about this - do we need this ? */
2706 if (page->mapping->a_ops->invalidatepage)
2707 page->mapping->a_ops->invalidatepage(page, offset);
2710 return 0; /* don't care */
2714 * The page straddles i_size. It must be zeroed out on each and every
2715 * writepage invocation because it may be mmapped. "A file is mapped
2716 * in multiples of the page size. For a file that is not a multiple of
2717 * the page size, the remaining memory is zeroed when mapped, and
2718 * writes to that region are not written out to the file."
2720 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
2722 ret = mpage_writepage(page, get_block, wbc);
2724 ret = __block_write_full_page(inode, page, get_block, wbc,
2725 end_buffer_async_write);
2728 EXPORT_SYMBOL(nobh_writepage);
2730 int nobh_truncate_page(struct address_space *mapping,
2731 loff_t from, get_block_t *get_block)
2733 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2734 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2737 unsigned length, pos;
2738 struct inode *inode = mapping->host;
2740 struct buffer_head map_bh;
2743 blocksize = 1 << inode->i_blkbits;
2744 length = offset & (blocksize - 1);
2746 /* Block boundary? Nothing to do */
2750 length = blocksize - length;
2751 iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2753 page = grab_cache_page(mapping, index);
2758 if (page_has_buffers(page)) {
2761 page_cache_release(page);
2762 return block_truncate_page(mapping, from, get_block);
2765 /* Find the buffer that contains "offset" */
2767 while (offset >= pos) {
2772 map_bh.b_size = blocksize;
2774 err = get_block(inode, iblock, &map_bh, 0);
2777 /* unmapped? It's a hole - nothing to do */
2778 if (!buffer_mapped(&map_bh))
2781 /* Ok, it's mapped. Make sure it's up-to-date */
2782 if (!PageUptodate(page)) {
2783 err = mapping->a_ops->readpage(NULL, page);
2785 page_cache_release(page);
2789 if (!PageUptodate(page)) {
2793 if (page_has_buffers(page))
2796 zero_user(page, offset, length);
2797 set_page_dirty(page);
2802 page_cache_release(page);
2806 EXPORT_SYMBOL(nobh_truncate_page);
2808 int block_truncate_page(struct address_space *mapping,
2809 loff_t from, get_block_t *get_block)
2811 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2812 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2815 unsigned length, pos;
2816 struct inode *inode = mapping->host;
2818 struct buffer_head *bh;
2821 blocksize = 1 << inode->i_blkbits;
2822 length = offset & (blocksize - 1);
2824 /* Block boundary? Nothing to do */
2828 length = blocksize - length;
2829 iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2831 page = grab_cache_page(mapping, index);
2836 if (!page_has_buffers(page))
2837 create_empty_buffers(page, blocksize, 0);
2839 /* Find the buffer that contains "offset" */
2840 bh = page_buffers(page);
2842 while (offset >= pos) {
2843 bh = bh->b_this_page;
2849 if (!buffer_mapped(bh)) {
2850 WARN_ON(bh->b_size != blocksize);
2851 err = get_block(inode, iblock, bh, 0);
2854 /* unmapped? It's a hole - nothing to do */
2855 if (!buffer_mapped(bh))
2859 /* Ok, it's mapped. Make sure it's up-to-date */
2860 if (PageUptodate(page))
2861 set_buffer_uptodate(bh);
2863 if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
2864 ll_rw_block(READ, 1, &bh);
2865 err = wait_on_buffer_async(bh, current->io_wait);
2867 WARN(1, "err=%d\n", err);
2870 /* Uhhuh. Read error. Complain and punt. */
2872 if (!buffer_uptodate(bh))
2876 zero_user(page, offset, length);
2877 mark_buffer_dirty(bh);
2882 page_cache_release(page);
2886 EXPORT_SYMBOL(block_truncate_page);
2889 * The generic ->writepage function for buffer-backed address_spaces
2890 * this form passes in the end_io handler used to finish the IO.
2892 int block_write_full_page_endio(struct page *page, get_block_t *get_block,
2893 struct writeback_control *wbc, bh_end_io_t *handler)
2895 struct inode * const inode = page->mapping->host;
2896 loff_t i_size = i_size_read(inode);
2897 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2900 /* Is the page fully inside i_size? */
2901 if (page->index < end_index)
2902 return __block_write_full_page(inode, page, get_block, wbc,
2905 /* Is the page fully outside i_size? (truncate in progress) */
2906 offset = i_size & (PAGE_CACHE_SIZE-1);
2907 if (page->index >= end_index+1 || !offset) {
2909 * The page may have dirty, unmapped buffers. For example,
2910 * they may have been added in ext3_writepage(). Make them
2911 * freeable here, so the page does not leak.
2913 do_invalidatepage(page, 0);
2915 return 0; /* don't care */
2919 * The page straddles i_size. It must be zeroed out on each and every
2920 * writepage invokation because it may be mmapped. "A file is mapped
2921 * in multiples of the page size. For a file that is not a multiple of
2922 * the page size, the remaining memory is zeroed when mapped, and
2923 * writes to that region are not written out to the file."
2925 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
2926 return __block_write_full_page(inode, page, get_block, wbc, handler);
2928 EXPORT_SYMBOL(block_write_full_page_endio);
2931 * The generic ->writepage function for buffer-backed address_spaces
2933 int block_write_full_page(struct page *page, get_block_t *get_block,
2934 struct writeback_control *wbc)
2936 return block_write_full_page_endio(page, get_block, wbc,
2937 end_buffer_async_write);
2939 EXPORT_SYMBOL(block_write_full_page);
2941 sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2942 get_block_t *get_block)
2944 struct buffer_head tmp;
2945 struct inode *inode = mapping->host;
2948 tmp.b_size = 1 << inode->i_blkbits;
2949 get_block(inode, block, &tmp, 0);
2950 return tmp.b_blocknr;
2952 EXPORT_SYMBOL(generic_block_bmap);
2954 static void end_bio_bh_io_sync(struct bio *bio, int err)
2956 struct buffer_head *bh = bio->bi_private;
2958 if (err == -EOPNOTSUPP) {
2959 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
2960 set_bit(BH_Eopnotsupp, &bh->b_state);
2963 if (unlikely (test_bit(BIO_QUIET,&bio->bi_flags)))
2964 set_bit(BH_Quiet, &bh->b_state);
2966 bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
2970 int submit_bh(int rw, struct buffer_head * bh)
2975 BUG_ON(!buffer_locked(bh));
2976 BUG_ON(!buffer_mapped(bh));
2977 BUG_ON(!bh->b_end_io);
2978 BUG_ON(buffer_delay(bh));
2979 BUG_ON(buffer_unwritten(bh));
2982 * Mask in barrier bit for a write (could be either a WRITE or a
2985 if (buffer_ordered(bh) && (rw & WRITE))
2986 rw |= WRITE_BARRIER;
2989 * Only clear out a write error when rewriting
2991 if (test_set_buffer_req(bh) && (rw & WRITE))
2992 clear_buffer_write_io_error(bh);
2995 * from here on down, it's all bio -- do the initial mapping,
2996 * submit_bio -> generic_make_request may further map this bio around
2998 bio = bio_alloc(GFP_NOIO, 1);
3000 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
3001 bio->bi_bdev = bh->b_bdev;
3002 bio->bi_io_vec[0].bv_page = bh->b_page;
3003 bio->bi_io_vec[0].bv_len = bh->b_size;
3004 bio->bi_io_vec[0].bv_offset = bh_offset(bh);
3008 bio->bi_size = bh->b_size;
3010 bio->bi_end_io = end_bio_bh_io_sync;
3011 bio->bi_private = bh;
3014 submit_bio(rw, bio);
3016 if (bio_flagged(bio, BIO_EOPNOTSUPP))
3022 EXPORT_SYMBOL(submit_bh);
3025 * ll_rw_block: low-level access to block devices (DEPRECATED)
3026 * @rw: whether to %READ or %WRITE or %SWRITE or maybe %READA (readahead)
3027 * @nr: number of &struct buffer_heads in the array
3028 * @bhs: array of pointers to &struct buffer_head
3030 * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
3031 * requests an I/O operation on them, either a %READ or a %WRITE. The third
3032 * %SWRITE is like %WRITE only we make sure that the *current* data in buffers
3033 * are sent to disk. The fourth %READA option is described in the documentation
3034 * for generic_make_request() which ll_rw_block() calls.
3036 * This function drops any buffer that it cannot get a lock on (with the
3037 * BH_Lock state bit) unless SWRITE is required, any buffer that appears to be
3038 * clean when doing a write request, and any buffer that appears to be
3039 * up-to-date when doing read request. Further it marks as clean buffers that
3040 * are processed for writing (the buffer cache won't assume that they are
3041 * actually clean until the buffer gets unlocked).
3043 * ll_rw_block sets b_end_io to simple completion handler that marks
3044 * the buffer up-to-date (if approriate), unlocks the buffer and wakes
3047 * All of the buffers must be for the same device, and must also be a
3048 * multiple of the current approved size for the device.
3050 void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
3054 for (i = 0; i < nr; i++) {
3055 struct buffer_head *bh = bhs[i];
3057 if (rw == SWRITE || rw == SWRITE_SYNC || rw == SWRITE_SYNC_PLUG)
3059 else if (!trylock_buffer(bh))
3062 if (rw == WRITE || rw == SWRITE || rw == SWRITE_SYNC ||
3063 rw == SWRITE_SYNC_PLUG) {
3064 if (test_clear_buffer_dirty(bh)) {
3065 bh->b_end_io = end_buffer_write_sync;
3067 if (rw == SWRITE_SYNC)
3068 submit_bh(WRITE_SYNC, bh);
3070 submit_bh(WRITE, bh);
3074 if (!buffer_uptodate(bh)) {
3075 bh->b_end_io = end_buffer_read_sync;
3084 EXPORT_SYMBOL(ll_rw_block);
3087 * For a data-integrity writeout, we need to wait upon any in-progress I/O
3088 * and then start new I/O and then wait upon it. The caller must have a ref on
3091 int sync_dirty_buffer(struct buffer_head *bh)
3095 WARN_ON(atomic_read(&bh->b_count) < 1);
3097 if (test_clear_buffer_dirty(bh)) {
3099 bh->b_end_io = end_buffer_write_sync;
3100 ret = submit_bh(WRITE_SYNC, bh);
3102 if (buffer_eopnotsupp(bh)) {
3103 clear_buffer_eopnotsupp(bh);
3106 if (!ret && !buffer_uptodate(bh))
3113 EXPORT_SYMBOL(sync_dirty_buffer);
3116 * try_to_free_buffers() checks if all the buffers on this particular page
3117 * are unused, and releases them if so.
3119 * Exclusion against try_to_free_buffers may be obtained by either
3120 * locking the page or by holding its mapping's private_lock.
3122 * If the page is dirty but all the buffers are clean then we need to
3123 * be sure to mark the page clean as well. This is because the page
3124 * may be against a block device, and a later reattachment of buffers
3125 * to a dirty page will set *all* buffers dirty. Which would corrupt
3126 * filesystem data on the same device.
3128 * The same applies to regular filesystem pages: if all the buffers are
3129 * clean then we set the page clean and proceed. To do that, we require
3130 * total exclusion from __set_page_dirty_buffers(). That is obtained with
3133 * try_to_free_buffers() is non-blocking.
3135 static inline int buffer_busy(struct buffer_head *bh)
3137 return atomic_read(&bh->b_count) |
3138 (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
3142 drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
3144 struct buffer_head *head = page_buffers(page);
3145 struct buffer_head *bh;
3149 if (buffer_write_io_error(bh) && page->mapping)
3150 set_bit(AS_EIO, &page->mapping->flags);
3151 if (buffer_busy(bh))
3153 bh = bh->b_this_page;
3154 } while (bh != head);
3157 struct buffer_head *next = bh->b_this_page;
3159 if (bh->b_assoc_map)
3160 __remove_assoc_queue(bh);
3162 } while (bh != head);
3163 *buffers_to_free = head;
3164 __clear_page_buffers(page);
3170 int try_to_free_buffers(struct page *page)
3172 struct address_space * const mapping = page->mapping;
3173 struct buffer_head *buffers_to_free = NULL;
3176 BUG_ON(!PageLocked(page));
3177 if (PageWriteback(page))
3180 if (mapping == NULL) { /* can this still happen? */
3181 ret = drop_buffers(page, &buffers_to_free);
3185 spin_lock(&mapping->private_lock);
3186 ret = drop_buffers(page, &buffers_to_free);
3189 * If the filesystem writes its buffers by hand (eg ext3)
3190 * then we can have clean buffers against a dirty page. We
3191 * clean the page here; otherwise the VM will never notice
3192 * that the filesystem did any IO at all.
3194 * Also, during truncate, discard_buffer will have marked all
3195 * the page's buffers clean. We discover that here and clean
3198 * private_lock must be held over this entire operation in order
3199 * to synchronise against __set_page_dirty_buffers and prevent the
3200 * dirty bit from being lost.
3203 cancel_dirty_page(page, PAGE_CACHE_SIZE);
3204 spin_unlock(&mapping->private_lock);
3206 if (buffers_to_free) {
3207 struct buffer_head *bh = buffers_to_free;
3210 struct buffer_head *next = bh->b_this_page;
3211 free_buffer_head(bh);
3213 } while (bh != buffers_to_free);
3217 EXPORT_SYMBOL(try_to_free_buffers);
3219 void block_sync_page(struct page *page)
3221 struct address_space *mapping;
3224 mapping = page_mapping(page);
3226 blk_run_backing_dev(mapping->backing_dev_info, page);
3228 EXPORT_SYMBOL(block_sync_page);
3231 * There are no bdflush tunables left. But distributions are
3232 * still running obsolete flush daemons, so we terminate them here.
3234 * Use of bdflush() is deprecated and will be removed in a future kernel.
3235 * The `flush-X' kernel threads fully replace bdflush daemons and this call.
3237 SYSCALL_DEFINE2(bdflush, int, func, long, data)
3239 static int msg_count;
3241 if (!capable(CAP_SYS_ADMIN))
3244 if (msg_count < 5) {
3247 "warning: process `%s' used the obsolete bdflush"
3248 " system call\n", current->comm);
3249 printk(KERN_INFO "Fix your initscripts?\n");
3258 * Buffer-head allocation
3260 static struct kmem_cache *bh_cachep;
3263 * Once the number of bh's in the machine exceeds this level, we start
3264 * stripping them in writeback.
3266 static int max_buffer_heads;
3268 int buffer_heads_over_limit;
3270 struct bh_accounting {
3271 int nr; /* Number of live bh's */
3272 int ratelimit; /* Limit cacheline bouncing */
3275 static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
3277 static void recalc_bh_state(void)
3282 if (__get_cpu_var(bh_accounting).ratelimit++ < 4096)
3284 __get_cpu_var(bh_accounting).ratelimit = 0;
3285 for_each_online_cpu(i)
3286 tot += per_cpu(bh_accounting, i).nr;
3287 buffer_heads_over_limit = (tot > max_buffer_heads);
3290 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
3292 struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags);
3294 INIT_LIST_HEAD(&ret->b_assoc_buffers);
3295 get_cpu_var(bh_accounting).nr++;
3297 put_cpu_var(bh_accounting);
3301 EXPORT_SYMBOL(alloc_buffer_head);
3303 void free_buffer_head(struct buffer_head *bh)
3305 BUG_ON(!list_empty(&bh->b_assoc_buffers));
3306 kmem_cache_free(bh_cachep, bh);
3307 get_cpu_var(bh_accounting).nr--;
3309 put_cpu_var(bh_accounting);
3311 EXPORT_SYMBOL(free_buffer_head);
3313 static void buffer_exit_cpu(int cpu)
3316 struct bh_lru *b = &per_cpu(bh_lrus, cpu);
3318 for (i = 0; i < BH_LRU_SIZE; i++) {
3322 get_cpu_var(bh_accounting).nr += per_cpu(bh_accounting, cpu).nr;
3323 per_cpu(bh_accounting, cpu).nr = 0;
3324 put_cpu_var(bh_accounting);
3327 static int buffer_cpu_notify(struct notifier_block *self,
3328 unsigned long action, void *hcpu)
3330 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
3331 buffer_exit_cpu((unsigned long)hcpu);
3336 * bh_uptodate_or_lock - Test whether the buffer is uptodate
3337 * @bh: struct buffer_head
3339 * Return true if the buffer is up-to-date and false,
3340 * with the buffer locked, if not.
3342 int bh_uptodate_or_lock(struct buffer_head *bh)
3344 if (!buffer_uptodate(bh)) {
3346 if (!buffer_uptodate(bh))
3352 EXPORT_SYMBOL(bh_uptodate_or_lock);
3355 * bh_submit_read - Submit a locked buffer for reading
3356 * @bh: struct buffer_head
3358 * Returns zero on success and -EIO on error.
3360 int bh_submit_read(struct buffer_head *bh)
3362 BUG_ON(!buffer_locked(bh));
3364 if (buffer_uptodate(bh)) {
3370 bh->b_end_io = end_buffer_read_sync;
3371 submit_bh(READ, bh);
3373 if (buffer_uptodate(bh))
3377 EXPORT_SYMBOL(bh_submit_read);
3380 init_buffer_head(void *data)
3382 struct buffer_head *bh = data;
3384 memset(bh, 0, sizeof(*bh));
3385 INIT_LIST_HEAD(&bh->b_assoc_buffers);
3388 void __init buffer_init(void)
3392 bh_cachep = kmem_cache_create("buffer_head",
3393 sizeof(struct buffer_head), 0,
3394 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
3399 * Limit the bh occupancy to 10% of ZONE_NORMAL
3401 nrpages = (nr_free_buffer_pages() * 10) / 100;
3402 max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3403 hotcpu_notifier(buffer_cpu_notify, 0);