From 2557c31c11e392d9e359a6b5beab1511e4b9c5b5 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Sat, 26 Sep 2009 17:39:07 +0200 Subject: [PATCH] ext2: async get_block and support code Signed-off-by: Jens Axboe --- fs/buffer.c | 23 +++++++++++----- fs/ext2/inode.c | 53 ++++++++++++++++++++++++------------- include/linux/buffer_head.h | 36 +++++++++++++++++++++++-- 3 files changed, 85 insertions(+), 27 deletions(-) diff --git a/fs/buffer.c b/fs/buffer.c index 25b307128b2e..2616498fb23a 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -76,6 +76,13 @@ void __lock_buffer(struct buffer_head *bh) } EXPORT_SYMBOL(__lock_buffer); +int __lock_buffer_async(struct buffer_head *bh, struct wait_bit_queue *wait) +{ + return wait_on_bit_lock_async(&bh->b_state, BH_Lock, sync_buffer, + TASK_UNINTERRUPTIBLE, wait); +} +EXPORT_SYMBOL(__lock_buffer_async); + void unlock_buffer(struct buffer_head *bh) { clear_bit_unlock(BH_Lock, &bh->b_state); @@ -1225,9 +1232,11 @@ void __bforget(struct buffer_head *bh) } EXPORT_SYMBOL(__bforget); -static struct buffer_head *__bread_slow(struct buffer_head *bh) +static struct buffer_head *__bread_slow(struct buffer_head *bh, + struct wait_bit_queue *wait) { - lock_buffer(bh); + if (lock_buffer_async(bh, wait)) + return ERR_PTR(-EIOCBRETRY); if (buffer_uptodate(bh)) { unlock_buffer(bh); return bh; @@ -1235,7 +1244,8 @@ static struct buffer_head *__bread_slow(struct buffer_head *bh) get_bh(bh); bh->b_end_io = end_buffer_read_sync; submit_bh(READ, bh); - wait_on_buffer(bh); + if (wait_on_buffer_async(bh, wait)) + return ERR_PTR(-EIOCBRETRY); if (buffer_uptodate(bh)) return bh; } @@ -1424,15 +1434,16 @@ EXPORT_SYMBOL(__breadahead); * It returns NULL if the block was unreadable. */ struct buffer_head * -__bread(struct block_device *bdev, sector_t block, unsigned size) +__bread_async(struct block_device *bdev, sector_t block, unsigned size, + struct wait_bit_queue *wait) { struct buffer_head *bh = __getblk(bdev, block, size); if (likely(bh) && !buffer_uptodate(bh)) - bh = __bread_slow(bh); + bh = __bread_slow(bh, wait); return bh; } -EXPORT_SYMBOL(__bread); +EXPORT_SYMBOL(__bread_async); /* * invalidate_bh_lrus() is called rarely - but not only at unmount. diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c index ade634076d0a..426d2f0d87b3 100644 --- a/fs/ext2/inode.c +++ b/fs/ext2/inode.c @@ -194,11 +194,9 @@ static int ext2_block_to_path(struct inode *inode, * or when it reads all @depth-1 indirect blocks successfully and finds * the whole chain, all way to the data (returns %NULL, *err == 0). */ -static Indirect *ext2_get_branch(struct inode *inode, - int depth, - int *offsets, - Indirect chain[4], - int *err) +static Indirect *ext2_get_branch(struct inode *inode, int depth, + int *offsets, Indirect chain[4], + int *err, struct wait_bit_queue *wait) { struct super_block *sb = inode->i_sb; Indirect *p = chain; @@ -210,8 +208,8 @@ static Indirect *ext2_get_branch(struct inode *inode, if (!p->key) goto no_block; while (--depth) { - bh = sb_bread(sb, le32_to_cpu(p->key)); - if (!bh) + bh = sb_bread_async(sb, le32_to_cpu(p->key), wait); + if (!bh || IS_ERR(bh)) goto failure; read_lock(&EXT2_I(inode)->i_meta_lock); if (!verify_chain(chain, p)) @@ -229,7 +227,10 @@ changed: *err = -EAGAIN; goto no_block; failure: - *err = -EIO; + if (IS_ERR(bh)) + *err = PTR_ERR(bh); + else + *err = -EIO; no_block: return p; } @@ -567,10 +568,10 @@ static void ext2_splice_branch(struct inode *inode, * return = 0, if plain lookup failed. * return < 0, error case. */ -static int ext2_get_blocks(struct inode *inode, - sector_t iblock, unsigned long maxblocks, - struct buffer_head *bh_result, - int create) +static int ext2_get_blocks(struct inode *inode, sector_t iblock, + unsigned long maxblocks, + struct buffer_head *bh_result, int create, + struct wait_bit_queue *wait) { int err = -EIO; int offsets[4]; @@ -589,7 +590,7 @@ static int ext2_get_blocks(struct inode *inode, if (depth == 0) return (err); - partial = ext2_get_branch(inode, depth, offsets, chain, &err); + partial = ext2_get_branch(inode, depth, offsets, chain, &err, wait); /* Simplest case - block found, no allocation needed */ if (!partial) { first_block = le32_to_cpu(chain[depth - 1].key); @@ -621,7 +622,7 @@ static int ext2_get_blocks(struct inode *inode, } /* Next simple case - plain lookup or failed read of indirect block */ - if (!create || err == -EIO) + if (!create || err == -EIO || err == -EIOCBRETRY) goto cleanup; mutex_lock(&ei->truncate_mutex); @@ -642,7 +643,8 @@ static int ext2_get_blocks(struct inode *inode, brelse(partial->bh); partial--; } - partial = ext2_get_branch(inode, depth, offsets, chain, &err); + partial = ext2_get_branch(inode, depth, offsets, chain, &err, + wait); if (!partial) { count++; mutex_unlock(&ei->truncate_mutex); @@ -715,7 +717,7 @@ int ext2_get_block(struct inode *inode, sector_t iblock, struct buffer_head *bh_ { unsigned max_blocks = bh_result->b_size >> inode->i_blkbits; int ret = ext2_get_blocks(inode, iblock, max_blocks, - bh_result, create); + bh_result, create, NULL); if (ret > 0) { bh_result->b_size = (ret << inode->i_blkbits); ret = 0; @@ -724,6 +726,19 @@ int ext2_get_block(struct inode *inode, sector_t iblock, struct buffer_head *bh_ } +int ext2_get_block_async(struct inode *inode, sector_t iblock, + struct buffer_head *bh_result, int create) +{ + unsigned max_blocks = bh_result->b_size >> inode->i_blkbits; + int ret = ext2_get_blocks(inode, iblock, max_blocks, + bh_result, create, current->io_wait); + if (ret > 0) { + bh_result->b_size = (ret << inode->i_blkbits); + ret = 0; + } + return ret; + +} int ext2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, u64 start, u64 len) { @@ -753,7 +768,7 @@ int __ext2_write_begin(struct file *file, struct address_space *mapping, struct page **pagep, void **fsdata) { return block_write_begin(file, mapping, pos, len, flags, pagep, fsdata, - ext2_get_block); + ext2_get_block_async); } static int @@ -776,7 +791,7 @@ ext2_nobh_write_begin(struct file *file, struct address_space *mapping, * pages in order to make this work easily. */ return nobh_write_begin(file, mapping, pos, len, flags, pagep, fsdata, - ext2_get_block); + ext2_get_block_async); } static int ext2_nobh_writepage(struct page *page, @@ -900,7 +915,7 @@ static Indirect *ext2_find_shared(struct inode *inode, *top = 0; for (k = depth; k > 1 && !offsets[k-1]; k--) ; - partial = ext2_get_branch(inode, k, offsets, chain, &err); + partial = ext2_get_branch(inode, k, offsets, chain, &err, NULL); if (!partial) partial = chain + k-1; /* diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index 70e3304ed7c8..fe62037d34f0 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h @@ -176,12 +176,13 @@ struct buffer_head *__getblk(struct block_device *bdev, sector_t block, void __brelse(struct buffer_head *); void __bforget(struct buffer_head *); void __breadahead(struct block_device *, sector_t block, unsigned int size); -struct buffer_head *__bread(struct block_device *, sector_t block, unsigned size); +struct buffer_head *__bread_async(struct block_device *, sector_t block, unsigned size, struct wait_bit_queue *); void invalidate_bh_lrus(void); struct buffer_head *alloc_buffer_head(gfp_t gfp_flags); void free_buffer_head(struct buffer_head * bh); void unlock_buffer(struct buffer_head *bh); void __lock_buffer(struct buffer_head *bh); +int __lock_buffer_async(struct buffer_head *bh, struct wait_bit_queue *wait); void ll_rw_block(int, int, struct buffer_head * bh[]); int sync_dirty_buffer(struct buffer_head *bh); int submit_bh(int, struct buffer_head *); @@ -273,10 +274,22 @@ static inline void bforget(struct buffer_head *bh) __bforget(bh); } +static inline struct buffer_head *__bread(struct block_device *bdev, + sector_t block, unsigned size) +{ + return __bread_async(bdev, block, size, NULL); +} + static inline struct buffer_head * sb_bread(struct super_block *sb, sector_t block) { - return __bread(sb->s_bdev, block, sb->s_blocksize); + return __bread_async(sb->s_bdev, block, sb->s_blocksize, NULL); +} + +static inline struct buffer_head * +sb_bread_async(struct super_block *sb, sector_t block, struct wait_bit_queue *w) +{ + return __bread_async(sb->s_bdev, block, sb->s_blocksize, w); } static inline void @@ -339,6 +352,25 @@ static inline void lock_buffer(struct buffer_head *bh) __lock_buffer(bh); } +static inline int lock_buffer_async(struct buffer_head *bh, + struct wait_bit_queue *wait) +{ + if (!trylock_buffer(bh)) { + DEFINE_WAIT_BIT(wq_stack, &bh->b_state, BH_Lock); + + if (!wait) + wait = &wq_stack; + else { + wait->key.flags = &bh->b_state; + wait->key.bit_nr = BH_Lock; + } + + return __lock_buffer_async(bh, wait); + } + + return 0; +} + extern int __set_page_dirty_buffers(struct page *page); #else /* CONFIG_BLOCK */ -- 2.25.1