ext2: async get_block and support code
[linux-block.git] / fs / buffer.c
index 209f7f15f5f801b4023f6e121643b3744ce1b11e..2616498fb23aa431fbd90f4e09d830e3163ec295 100644 (file)
@@ -64,7 +64,8 @@ static int sync_buffer(void *word)
        bd = bh->b_bdev;
        if (bd)
                blk_run_address_space(bd->bd_inode->i_mapping);
-       io_schedule();
+       if (!in_aio(current))
+               io_schedule();
        return 0;
 }
 
@@ -75,6 +76,13 @@ void __lock_buffer(struct buffer_head *bh)
 }
 EXPORT_SYMBOL(__lock_buffer);
 
+int __lock_buffer_async(struct buffer_head *bh, struct wait_bit_queue *wait)
+{
+       return wait_on_bit_lock_async(&bh->b_state, BH_Lock, sync_buffer,
+                                               TASK_UNINTERRUPTIBLE, wait);
+}
+EXPORT_SYMBOL(__lock_buffer_async);
+
 void unlock_buffer(struct buffer_head *bh)
 {
        clear_bit_unlock(BH_Lock, &bh->b_state);
@@ -94,6 +102,13 @@ void __wait_on_buffer(struct buffer_head * bh)
 }
 EXPORT_SYMBOL(__wait_on_buffer);
 
+int __wait_on_buffer_async(struct buffer_head *bh, struct wait_bit_queue *wait)
+{
+       return wait_on_bit_async(&bh->b_state, BH_Lock, sync_buffer,
+                                               TASK_UNINTERRUPTIBLE, wait);
+}
+EXPORT_SYMBOL(__wait_on_buffer_async);
+
 static void
 __clear_page_buffers(struct page *page)
 {
@@ -280,7 +295,7 @@ void invalidate_bdev(struct block_device *bdev)
 EXPORT_SYMBOL(invalidate_bdev);
 
 /*
- * Kick pdflush then try to free up some ZONE_NORMAL memory.
+ * Kick the writeback threads then try to free up some ZONE_NORMAL memory.
  */
 static void free_more_memory(void)
 {
@@ -1217,9 +1232,11 @@ void __bforget(struct buffer_head *bh)
 }
 EXPORT_SYMBOL(__bforget);
 
-static struct buffer_head *__bread_slow(struct buffer_head *bh)
+static struct buffer_head *__bread_slow(struct buffer_head *bh,
+                                       struct wait_bit_queue *wait)
 {
-       lock_buffer(bh);
+       if (lock_buffer_async(bh, wait))
+               return ERR_PTR(-EIOCBRETRY);
        if (buffer_uptodate(bh)) {
                unlock_buffer(bh);
                return bh;
@@ -1227,7 +1244,8 @@ static struct buffer_head *__bread_slow(struct buffer_head *bh)
                get_bh(bh);
                bh->b_end_io = end_buffer_read_sync;
                submit_bh(READ, bh);
-               wait_on_buffer(bh);
+               if (wait_on_buffer_async(bh, wait))
+                       return ERR_PTR(-EIOCBRETRY);
                if (buffer_uptodate(bh))
                        return bh;
        }
@@ -1416,15 +1434,16 @@ EXPORT_SYMBOL(__breadahead);
  *  It returns NULL if the block was unreadable.
  */
 struct buffer_head *
-__bread(struct block_device *bdev, sector_t block, unsigned size)
+__bread_async(struct block_device *bdev, sector_t block, unsigned size,
+             struct wait_bit_queue *wait)
 {
        struct buffer_head *bh = __getblk(bdev, block, size);
 
        if (likely(bh) && !buffer_uptodate(bh))
-               bh = __bread_slow(bh);
+               bh = __bread_slow(bh, wait);
        return bh;
 }
-EXPORT_SYMBOL(__bread);
+EXPORT_SYMBOL(__bread_async);
 
 /*
  * invalidate_bh_lrus() is called rarely - but not only at unmount.
@@ -1709,9 +1728,9 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
                /*
                 * If it's a fully non-blocking write attempt and we cannot
                 * lock the buffer then redirty the page.  Note that this can
-                * potentially cause a busy-wait loop from pdflush and kswapd
-                * activity, but those code paths have their own higher-level
-                * throttling.
+                * potentially cause a busy-wait loop from writeback threads
+                * and kswapd activity, but those code paths have their own
+                * higher-level throttling.
                 */
                if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
                        lock_buffer(bh);
@@ -1912,7 +1931,13 @@ static int __block_prepare_write(struct inode *inode, struct page *page,
         * If we issued read requests - let them complete.
         */
        while(wait_bh > wait) {
-               wait_on_buffer(*--wait_bh);
+               int ret;
+
+               ret = wait_on_buffer_async(*--wait_bh, current->io_wait);
+               if (ret && !err) {
+                       WARN(1, "%s: ret\n", __FUNCTION__);
+                       err = ret;
+               }
                if (!buffer_uptodate(*wait_bh))
                        err = -EIO;
        }
@@ -2239,16 +2264,10 @@ int generic_cont_expand_simple(struct inode *inode, loff_t size)
        struct address_space *mapping = inode->i_mapping;
        struct page *page;
        void *fsdata;
-       unsigned long limit;
        int err;
 
-       err = -EFBIG;
-        limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
-       if (limit != RLIM_INFINITY && size > (loff_t)limit) {
-               send_sig(SIGXFSZ, current, 0);
-               goto out;
-       }
-       if (size > inode->i_sb->s_maxbytes)
+       err = inode_newsize_ok(inode, size);
+       if (err)
                goto out;
 
        err = pagecache_write_begin(NULL, mapping, size, 0,
@@ -2587,7 +2606,13 @@ int nobh_write_begin(struct file *file, struct address_space *mapping,
                 * for the buffer_head refcounts.
                 */
                for (bh = head; bh; bh = bh->b_this_page) {
-                       wait_on_buffer(bh);
+                       int err;
+       
+                       err = wait_on_buffer_async(bh, current->io_wait);
+                       if (err && !ret) {
+                               WARN(1, "%s: ret\n", __FUNCTION__);
+                               ret = err;
+                       }
                        if (!buffer_uptodate(bh))
                                ret = -EIO;
                }
@@ -2847,10 +2872,14 @@ int block_truncate_page(struct address_space *mapping,
                set_buffer_uptodate(bh);
 
        if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
-               err = -EIO;
                ll_rw_block(READ, 1, &bh);
-               wait_on_buffer(bh);
+               err = wait_on_buffer_async(bh, current->io_wait);
+               if (err) {
+                       WARN(1, "err=%d\n", err);
+                       goto out;
+               }
                /* Uhhuh. Read error. Complain and punt. */
+               err = -EIO;
                if (!buffer_uptodate(bh))
                        goto unlock;
        }
@@ -3214,7 +3243,7 @@ EXPORT_SYMBOL(block_sync_page);
  * still running obsolete flush daemons, so we terminate them here.
  *
  * Use of bdflush() is deprecated and will be removed in a future kernel.
- * The `pdflush' kernel threads fully replace bdflush daemons and this call.
+ * The `flush-X' kernel threads fully replace bdflush daemons and this call.
  */
 SYSCALL_DEFINE2(bdflush, int, func, long, data)
 {