bh->b_end_io = handler;
bh->b_private = private;
}
+EXPORT_SYMBOL(init_buffer);
static int sync_buffer(void *word)
{
bd = bh->b_bdev;
if (bd)
blk_run_address_space(bd->bd_inode->i_mapping);
- io_schedule();
+ if (!in_aio(current))
+ io_schedule();
return 0;
}
}
EXPORT_SYMBOL(__lock_buffer);
+int __lock_buffer_async(struct buffer_head *bh, struct wait_bit_queue *wait)
+{
+ return wait_on_bit_lock_async(&bh->b_state, BH_Lock, sync_buffer,
+ TASK_UNINTERRUPTIBLE, wait);
+}
+EXPORT_SYMBOL(__lock_buffer_async);
+
void unlock_buffer(struct buffer_head *bh)
{
clear_bit_unlock(BH_Lock, &bh->b_state);
smp_mb__after_clear_bit();
wake_up_bit(&bh->b_state, BH_Lock);
}
+EXPORT_SYMBOL(unlock_buffer);
/*
* Block until a buffer comes unlocked. This doesn't stop it
{
wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE);
}
+EXPORT_SYMBOL(__wait_on_buffer);
+
+int __wait_on_buffer_async(struct buffer_head *bh, struct wait_bit_queue *wait)
+{
+ return wait_on_bit_async(&bh->b_state, BH_Lock, sync_buffer,
+ TASK_UNINTERRUPTIBLE, wait);
+}
+EXPORT_SYMBOL(__wait_on_buffer_async);
static void
__clear_page_buffers(struct page *page)
__end_buffer_read_notouch(bh, uptodate);
put_bh(bh);
}
+EXPORT_SYMBOL(end_buffer_read_sync);
void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
{
unlock_buffer(bh);
put_bh(bh);
}
+EXPORT_SYMBOL(end_buffer_write_sync);
/*
* Various filesystems appear to want __find_get_block to be non-blocking.
invalidate_bh_lrus();
invalidate_mapping_pages(mapping, 0, -1);
}
+EXPORT_SYMBOL(invalidate_bdev);
/*
- * Kick pdflush then try to free up some ZONE_NORMAL memory.
+ * Kick the writeback threads then try to free up some ZONE_NORMAL memory.
*/
static void free_more_memory(void)
{
local_irq_restore(flags);
return;
}
+EXPORT_SYMBOL(end_buffer_async_write);
/*
* If a page's buffers are under async readin (end_buffer_async_read
set_buffer_async_read(bh);
}
-void mark_buffer_async_write_endio(struct buffer_head *bh,
- bh_end_io_t *handler)
+static void mark_buffer_async_write_endio(struct buffer_head *bh,
+ bh_end_io_t *handler)
{
bh->b_end_io = handler;
set_buffer_async_write(bh);
return err;
}
-void do_thaw_all(struct work_struct *work)
+static void do_thaw_all(struct work_struct *work)
{
struct super_block *sb;
char b[BDEVNAME_SIZE];
}
}
}
+EXPORT_SYMBOL(mark_buffer_dirty);
/*
* Decrement a buffer_head's reference count. If all buffers against a page
}
WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n");
}
+EXPORT_SYMBOL(__brelse);
/*
* bforget() is like brelse(), except it discards any
}
__brelse(bh);
}
+EXPORT_SYMBOL(__bforget);
-static struct buffer_head *__bread_slow(struct buffer_head *bh)
+static struct buffer_head *__bread_slow(struct buffer_head *bh,
+ struct wait_bit_queue *wait)
{
- lock_buffer(bh);
+ if (lock_buffer_async(bh, wait))
+ return ERR_PTR(-EIOCBRETRY);
if (buffer_uptodate(bh)) {
unlock_buffer(bh);
return bh;
get_bh(bh);
bh->b_end_io = end_buffer_read_sync;
submit_bh(READ, bh);
- wait_on_buffer(bh);
+ if (wait_on_buffer_async(bh, wait))
+ return ERR_PTR(-EIOCBRETRY);
if (buffer_uptodate(bh))
return bh;
}
* It returns NULL if the block was unreadable.
*/
struct buffer_head *
-__bread(struct block_device *bdev, sector_t block, unsigned size)
+__bread_async(struct block_device *bdev, sector_t block, unsigned size,
+ struct wait_bit_queue *wait)
{
struct buffer_head *bh = __getblk(bdev, block, size);
if (likely(bh) && !buffer_uptodate(bh))
- bh = __bread_slow(bh);
+ bh = __bread_slow(bh, wait);
return bh;
}
-EXPORT_SYMBOL(__bread);
+EXPORT_SYMBOL(__bread_async);
/*
* invalidate_bh_lrus() is called rarely - but not only at unmount.
/*
* If it's a fully non-blocking write attempt and we cannot
* lock the buffer then redirty the page. Note that this can
- * potentially cause a busy-wait loop from pdflush and kswapd
- * activity, but those code paths have their own higher-level
- * throttling.
+ * potentially cause a busy-wait loop from writeback threads
+ * and kswapd activity, but those code paths have their own
+ * higher-level throttling.
*/
if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
lock_buffer(bh);
* If we issued read requests - let them complete.
*/
while(wait_bh > wait) {
- wait_on_buffer(*--wait_bh);
+ int ret;
+
+ ret = wait_on_buffer_async(*--wait_bh, current->io_wait);
+ if (ret && !err) {
+ WARN(1, "%s: ret\n", __FUNCTION__);
+ err = ret;
+ }
if (!buffer_uptodate(*wait_bh))
err = -EIO;
}
}
return 0;
}
+EXPORT_SYMBOL(block_read_full_page);
/* utility function for filesystems that need to do work on expanding
* truncates. Uses filesystem pagecache writes to allow the filesystem to
struct address_space *mapping = inode->i_mapping;
struct page *page;
void *fsdata;
- unsigned long limit;
int err;
- err = -EFBIG;
- limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
- if (limit != RLIM_INFINITY && size > (loff_t)limit) {
- send_sig(SIGXFSZ, current, 0);
- goto out;
- }
- if (size > inode->i_sb->s_maxbytes)
+ err = inode_newsize_ok(inode, size);
+ if (err)
goto out;
err = pagecache_write_begin(NULL, mapping, size, 0,
out:
return err;
}
+EXPORT_SYMBOL(generic_cont_expand_simple);
static int cont_expand_zero(struct file *file, struct address_space *mapping,
loff_t pos, loff_t *bytes)
out:
return err;
}
+EXPORT_SYMBOL(cont_write_begin);
int block_prepare_write(struct page *page, unsigned from, unsigned to,
get_block_t *get_block)
ClearPageUptodate(page);
return err;
}
+EXPORT_SYMBOL(block_prepare_write);
int block_commit_write(struct page *page, unsigned from, unsigned to)
{
__block_commit_write(inode,page,from,to);
return 0;
}
+EXPORT_SYMBOL(block_commit_write);
/*
* block_page_mkwrite() is not allowed to change the file size as it gets
out:
return ret;
}
+EXPORT_SYMBOL(block_page_mkwrite);
/*
* nobh_write_begin()'s prereads are special: the buffer_heads are freed
* for the buffer_head refcounts.
*/
for (bh = head; bh; bh = bh->b_this_page) {
- wait_on_buffer(bh);
+ int err;
+
+ err = wait_on_buffer_async(bh, current->io_wait);
+ if (err && !ret) {
+ WARN(1, "%s: ret\n", __FUNCTION__);
+ ret = err;
+ }
if (!buffer_uptodate(bh))
ret = -EIO;
}
set_buffer_uptodate(bh);
if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
- err = -EIO;
ll_rw_block(READ, 1, &bh);
- wait_on_buffer(bh);
+ err = wait_on_buffer_async(bh, current->io_wait);
+ if (err) {
+ WARN(1, "err=%d\n", err);
+ goto out;
+ }
/* Uhhuh. Read error. Complain and punt. */
+ err = -EIO;
if (!buffer_uptodate(bh))
goto unlock;
}
out:
return err;
}
+EXPORT_SYMBOL(block_truncate_page);
/*
* The generic ->writepage function for buffer-backed address_spaces
zero_user_segment(page, offset, PAGE_CACHE_SIZE);
return __block_write_full_page(inode, page, get_block, wbc, handler);
}
+EXPORT_SYMBOL(block_write_full_page_endio);
/*
* The generic ->writepage function for buffer-backed address_spaces
return block_write_full_page_endio(page, get_block, wbc,
end_buffer_async_write);
}
-
+EXPORT_SYMBOL(block_write_full_page);
sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
get_block_t *get_block)
get_block(inode, block, &tmp, 0);
return tmp.b_blocknr;
}
+EXPORT_SYMBOL(generic_block_bmap);
static void end_bio_bh_io_sync(struct bio *bio, int err)
{
bio_put(bio);
return ret;
}
+EXPORT_SYMBOL(submit_bh);
/**
* ll_rw_block: low-level access to block devices (DEPRECATED)
unlock_buffer(bh);
}
}
+EXPORT_SYMBOL(ll_rw_block);
/*
* For a data-integrity writeout, we need to wait upon any in-progress I/O
}
return ret;
}
+EXPORT_SYMBOL(sync_dirty_buffer);
/*
* try_to_free_buffers() checks if all the buffers on this particular page
if (mapping)
blk_run_backing_dev(mapping->backing_dev_info, page);
}
+EXPORT_SYMBOL(block_sync_page);
/*
* There are no bdflush tunables left. But distributions are
* still running obsolete flush daemons, so we terminate them here.
*
* Use of bdflush() is deprecated and will be removed in a future kernel.
- * The `pdflush' kernel threads fully replace bdflush daemons and this call.
+ * The `flush-X' kernel threads fully replace bdflush daemons and this call.
*/
SYSCALL_DEFINE2(bdflush, int, func, long, data)
{
max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
hotcpu_notifier(buffer_cpu_notify, 0);
}
-
-EXPORT_SYMBOL(__bforget);
-EXPORT_SYMBOL(__brelse);
-EXPORT_SYMBOL(__wait_on_buffer);
-EXPORT_SYMBOL(block_commit_write);
-EXPORT_SYMBOL(block_prepare_write);
-EXPORT_SYMBOL(block_page_mkwrite);
-EXPORT_SYMBOL(block_read_full_page);
-EXPORT_SYMBOL(block_sync_page);
-EXPORT_SYMBOL(block_truncate_page);
-EXPORT_SYMBOL(block_write_full_page);
-EXPORT_SYMBOL(block_write_full_page_endio);
-EXPORT_SYMBOL(cont_write_begin);
-EXPORT_SYMBOL(end_buffer_read_sync);
-EXPORT_SYMBOL(end_buffer_write_sync);
-EXPORT_SYMBOL(end_buffer_async_write);
-EXPORT_SYMBOL(file_fsync);
-EXPORT_SYMBOL(generic_block_bmap);
-EXPORT_SYMBOL(generic_cont_expand_simple);
-EXPORT_SYMBOL(init_buffer);
-EXPORT_SYMBOL(invalidate_bdev);
-EXPORT_SYMBOL(ll_rw_block);
-EXPORT_SYMBOL(mark_buffer_dirty);
-EXPORT_SYMBOL(submit_bh);
-EXPORT_SYMBOL(sync_dirty_buffer);
-EXPORT_SYMBOL(unlock_buffer);