}
EXPORT_SYMBOL(__lock_buffer);
+int __lock_buffer_async(struct buffer_head *bh, struct wait_bit_queue *wait)
+{
+ return wait_on_bit_lock_async(&bh->b_state, BH_Lock, sync_buffer,
+ TASK_UNINTERRUPTIBLE, wait);
+}
+EXPORT_SYMBOL(__lock_buffer_async);
+
void unlock_buffer(struct buffer_head *bh)
{
clear_bit_unlock(BH_Lock, &bh->b_state);
}
EXPORT_SYMBOL(__bforget);
-static struct buffer_head *__bread_slow(struct buffer_head *bh)
+static struct buffer_head *__bread_slow(struct buffer_head *bh,
+ struct wait_bit_queue *wait)
{
- lock_buffer(bh);
+ if (lock_buffer_async(bh, wait))
+ return ERR_PTR(-EIOCBRETRY);
if (buffer_uptodate(bh)) {
unlock_buffer(bh);
return bh;
get_bh(bh);
bh->b_end_io = end_buffer_read_sync;
submit_bh(READ, bh);
- wait_on_buffer(bh);
+ if (wait_on_buffer_async(bh, wait))
+ return ERR_PTR(-EIOCBRETRY);
if (buffer_uptodate(bh))
return bh;
}
* It returns NULL if the block was unreadable.
*/
struct buffer_head *
-__bread(struct block_device *bdev, sector_t block, unsigned size)
+__bread_async(struct block_device *bdev, sector_t block, unsigned size,
+ struct wait_bit_queue *wait)
{
struct buffer_head *bh = __getblk(bdev, block, size);
if (likely(bh) && !buffer_uptodate(bh))
- bh = __bread_slow(bh);
+ bh = __bread_slow(bh, wait);
return bh;
}
-EXPORT_SYMBOL(__bread);
+EXPORT_SYMBOL(__bread_async);
/*
* invalidate_bh_lrus() is called rarely - but not only at unmount.
* or when it reads all @depth-1 indirect blocks successfully and finds
* the whole chain, all way to the data (returns %NULL, *err == 0).
*/
-static Indirect *ext2_get_branch(struct inode *inode,
- int depth,
- int *offsets,
- Indirect chain[4],
- int *err)
+static Indirect *ext2_get_branch(struct inode *inode, int depth,
+ int *offsets, Indirect chain[4],
+ int *err, struct wait_bit_queue *wait)
{
struct super_block *sb = inode->i_sb;
Indirect *p = chain;
if (!p->key)
goto no_block;
while (--depth) {
- bh = sb_bread(sb, le32_to_cpu(p->key));
- if (!bh)
+ bh = sb_bread_async(sb, le32_to_cpu(p->key), wait);
+ if (!bh || IS_ERR(bh))
goto failure;
read_lock(&EXT2_I(inode)->i_meta_lock);
if (!verify_chain(chain, p))
*err = -EAGAIN;
goto no_block;
failure:
- *err = -EIO;
+ if (IS_ERR(bh))
+ *err = PTR_ERR(bh);
+ else
+ *err = -EIO;
no_block:
return p;
}
* return = 0, if plain lookup failed.
* return < 0, error case.
*/
-static int ext2_get_blocks(struct inode *inode,
- sector_t iblock, unsigned long maxblocks,
- struct buffer_head *bh_result,
- int create)
+static int ext2_get_blocks(struct inode *inode, sector_t iblock,
+ unsigned long maxblocks,
+ struct buffer_head *bh_result, int create,
+ struct wait_bit_queue *wait)
{
int err = -EIO;
int offsets[4];
if (depth == 0)
return (err);
- partial = ext2_get_branch(inode, depth, offsets, chain, &err);
+ partial = ext2_get_branch(inode, depth, offsets, chain, &err, wait);
/* Simplest case - block found, no allocation needed */
if (!partial) {
first_block = le32_to_cpu(chain[depth - 1].key);
}
/* Next simple case - plain lookup or failed read of indirect block */
- if (!create || err == -EIO)
+ if (!create || err == -EIO || err == -EIOCBRETRY)
goto cleanup;
mutex_lock(&ei->truncate_mutex);
brelse(partial->bh);
partial--;
}
- partial = ext2_get_branch(inode, depth, offsets, chain, &err);
+ partial = ext2_get_branch(inode, depth, offsets, chain, &err,
+ wait);
if (!partial) {
count++;
mutex_unlock(&ei->truncate_mutex);
{
unsigned max_blocks = bh_result->b_size >> inode->i_blkbits;
int ret = ext2_get_blocks(inode, iblock, max_blocks,
- bh_result, create);
+ bh_result, create, NULL);
if (ret > 0) {
bh_result->b_size = (ret << inode->i_blkbits);
ret = 0;
}
+int ext2_get_block_async(struct inode *inode, sector_t iblock,
+ struct buffer_head *bh_result, int create)
+{
+ unsigned max_blocks = bh_result->b_size >> inode->i_blkbits;
+ int ret = ext2_get_blocks(inode, iblock, max_blocks,
+ bh_result, create, current->io_wait);
+ if (ret > 0) {
+ bh_result->b_size = (ret << inode->i_blkbits);
+ ret = 0;
+ }
+ return ret;
+
+}
int ext2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
u64 start, u64 len)
{
struct page **pagep, void **fsdata)
{
return block_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
- ext2_get_block);
+ ext2_get_block_async);
}
static int
* pages in order to make this work easily.
*/
return nobh_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
- ext2_get_block);
+ ext2_get_block_async);
}
static int ext2_nobh_writepage(struct page *page,
*top = 0;
for (k = depth; k > 1 && !offsets[k-1]; k--)
;
- partial = ext2_get_branch(inode, k, offsets, chain, &err);
+ partial = ext2_get_branch(inode, k, offsets, chain, &err, NULL);
if (!partial)
partial = chain + k-1;
/*
void __brelse(struct buffer_head *);
void __bforget(struct buffer_head *);
void __breadahead(struct block_device *, sector_t block, unsigned int size);
-struct buffer_head *__bread(struct block_device *, sector_t block, unsigned size);
+struct buffer_head *__bread_async(struct block_device *, sector_t block, unsigned size, struct wait_bit_queue *);
void invalidate_bh_lrus(void);
struct buffer_head *alloc_buffer_head(gfp_t gfp_flags);
void free_buffer_head(struct buffer_head * bh);
void unlock_buffer(struct buffer_head *bh);
void __lock_buffer(struct buffer_head *bh);
+int __lock_buffer_async(struct buffer_head *bh, struct wait_bit_queue *wait);
void ll_rw_block(int, int, struct buffer_head * bh[]);
int sync_dirty_buffer(struct buffer_head *bh);
int submit_bh(int, struct buffer_head *);
__bforget(bh);
}
+static inline struct buffer_head *__bread(struct block_device *bdev,
+ sector_t block, unsigned size)
+{
+ return __bread_async(bdev, block, size, NULL);
+}
+
static inline struct buffer_head *
sb_bread(struct super_block *sb, sector_t block)
{
- return __bread(sb->s_bdev, block, sb->s_blocksize);
+ return __bread_async(sb->s_bdev, block, sb->s_blocksize, NULL);
+}
+
+static inline struct buffer_head *
+sb_bread_async(struct super_block *sb, sector_t block, struct wait_bit_queue *w)
+{
+ return __bread_async(sb->s_bdev, block, sb->s_blocksize, w);
}
static inline void
__lock_buffer(bh);
}
+static inline int lock_buffer_async(struct buffer_head *bh,
+ struct wait_bit_queue *wait)
+{
+ if (!trylock_buffer(bh)) {
+ DEFINE_WAIT_BIT(wq_stack, &bh->b_state, BH_Lock);
+
+ if (!wait)
+ wait = &wq_stack;
+ else {
+ wait->key.flags = &bh->b_state;
+ wait->key.bit_nr = BH_Lock;
+ }
+
+ return __lock_buffer_async(bh, wait);
+ }
+
+ return 0;
+}
+
extern int __set_page_dirty_buffers(struct page *page);
#else /* CONFIG_BLOCK */