bd = bh->b_bdev;
if (bd)
blk_run_address_space(bd->bd_inode->i_mapping);
- io_schedule();
+ if (!in_aio(current))
+ io_schedule();
return 0;
}
}
EXPORT_SYMBOL(__wait_on_buffer);
+int __wait_on_buffer_async(struct buffer_head *bh, struct wait_bit_queue *wait)
+{
+ return wait_on_bit_async(&bh->b_state, BH_Lock, sync_buffer,
+ TASK_UNINTERRUPTIBLE, wait);
+}
+EXPORT_SYMBOL(__wait_on_buffer_async);
+
static void
__clear_page_buffers(struct page *page)
{
* If we issued read requests - let them complete.
*/
while(wait_bh > wait) {
- wait_on_buffer(*--wait_bh);
+ int ret;
+
+ ret = wait_on_buffer_async(*--wait_bh, current->io_wait);
+ if (ret && !err)
+ err = ret;
if (!buffer_uptodate(*wait_bh))
err = -EIO;
}
* for the buffer_head refcounts.
*/
for (bh = head; bh; bh = bh->b_this_page) {
- wait_on_buffer(bh);
+ int err;
+
+ err = wait_on_buffer_async(bh, current->io_wait);
+ if (err && !ret)
+ ret = err;
if (!buffer_uptodate(bh))
ret = -EIO;
}
set_buffer_uptodate(bh);
if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
- err = -EIO;
ll_rw_block(READ, 1, &bh);
- wait_on_buffer(bh);
+ err = wait_on_buffer_async(bh, current->io_wait);
/* Uhhuh. Read error. Complain and punt. */
+ err = -EIO;
if (!buffer_uptodate(bh))
goto unlock;
}
get_bh(bh);
bh->b_end_io = end_buffer_read_sync;
submit_bh(READ, bh);
- wait_on_buffer(bh);
+ if (wait_on_buffer_async(bh, current->io_wait))
+ return -EIOCBRETRY;
if (buffer_uptodate(bh))
return 0;
return -EIO;
void mark_buffer_async_write(struct buffer_head *bh);
void __wait_on_buffer(struct buffer_head *);
+int __wait_on_buffer_async(struct buffer_head *, struct wait_bit_queue *);
wait_queue_head_t *bh_waitq_head(struct buffer_head *bh);
struct buffer_head *__find_get_block(struct block_device *bdev, sector_t block,
unsigned size);
__wait_on_buffer(bh);
}
+static inline int wait_on_buffer_async(struct buffer_head *bh,
+ struct wait_bit_queue *wait)
+{
+ if (buffer_locked(bh) || atomic_read(&bh->b_count) == 0)
+ return __wait_on_buffer_async(bh, wait);
+
+ return 0;
+}
+
static inline int trylock_buffer(struct buffer_head *bh)
{
return likely(!test_and_set_bit_lock(BH_Lock, &bh->b_state));
*/
static inline int lock_page_async(struct page *page, struct wait_bit_queue *wq)
{
- might_sleep();
-
if (!trylock_page(page)) {
DEFINE_WAIT_BIT(wq_stack, &page->flags, PG_locked);
wait_on_page_bit(page, PG_locked);
}
-extern int wait_on_page_bit_wq(struct page *, int, struct wait_bit_queue *);
+extern int wait_on_page_bit_async(struct page *page, int bit_nr,
+ struct wait_bit_queue *);
-static inline int wait_on_page_locked_wq(struct page *page,
- struct wait_bit_queue *wait)
+static inline int wait_on_page_locked_async(struct page *page,
+ struct wait_bit_queue *wait)
{
if (PageLocked(page))
- return wait_on_page_bit_wq(page, PG_locked, wait);
+ return wait_on_page_bit_async(page, PG_locked, wait);
return 0;
}
int __wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned);
int __wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned);
void wake_up_bit(void *, int);
-int out_of_line_wait_on_bit(void *, int, int (*)(void *), unsigned);
-int out_of_line_wait_on_bit_lock(void *, int, int (*)(void *), unsigned);
+int out_of_line_wait_on_bit(void *, int, int (*)(void *), unsigned, struct wait_bit_queue *);
+int out_of_line_wait_on_bit_lock(void *, int, int (*)(void *), unsigned, struct wait_bit_queue *);
wait_queue_head_t *bit_waitqueue(void *, int);
#define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL)
{
if (!test_bit(bit, word))
return 0;
- return out_of_line_wait_on_bit(word, bit, action, mode);
+ return out_of_line_wait_on_bit(word, bit, action, mode, NULL);
+}
+
+static inline int wait_on_bit_async(void *word, int bit,
+ int (*action)(void *), unsigned mode,
+ struct wait_bit_queue *wait)
+{
+ if (!test_bit(bit, word))
+ return 0;
+ return out_of_line_wait_on_bit(word, bit, action, mode, wait);
}
/**
{
if (!test_and_set_bit(bit, word))
return 0;
- return out_of_line_wait_on_bit_lock(word, bit, action, mode);
+ return out_of_line_wait_on_bit_lock(word, bit, action, mode, NULL);
+}
+
+static inline int wait_on_bit_lock_async(void *word, int bit,
+ int (*action)(void *), unsigned mode,
+ struct wait_bit_queue *wait)
+{
+ if (!test_and_set_bit(bit, word))
+ return 0;
+ return out_of_line_wait_on_bit_lock(word, bit, action, mode, wait);
}
#endif /* __KERNEL__ */
EXPORT_SYMBOL(__wait_on_bit);
int __sched out_of_line_wait_on_bit(void *word, int bit,
- int (*action)(void *), unsigned mode)
+ int (*action)(void *), unsigned mode,
+ struct wait_bit_queue *wait)
+
{
wait_queue_head_t *wq = bit_waitqueue(word, bit);
- DEFINE_WAIT_BIT(wait, word, bit);
+ DEFINE_WAIT_BIT(stack_wait, word, bit);
- return __wait_on_bit(wq, &wait, action, mode);
+ if (!wait)
+ wait = &stack_wait;
+ else {
+ wait->key.flags = word;
+ wait->key.bit_nr = bit;
+ }
+
+ return __wait_on_bit(wq, wait, action, mode);
}
EXPORT_SYMBOL(out_of_line_wait_on_bit);
EXPORT_SYMBOL(__wait_on_bit_lock);
int __sched out_of_line_wait_on_bit_lock(void *word, int bit,
- int (*action)(void *), unsigned mode)
+ int (*action)(void *), unsigned mode,
+ struct wait_bit_queue *wait)
{
wait_queue_head_t *wq = bit_waitqueue(word, bit);
- DEFINE_WAIT_BIT(wait, word, bit);
+ DEFINE_WAIT_BIT(stack_wait, word, bit);
+
+ if (!wait)
+ wait = &stack_wait;
+ else {
+ wait->key.flags = word;
+ wait->key.bit_nr = bit;
+ }
- return __wait_on_bit_lock(wq, &wait, action, mode);
+ return __wait_on_bit_lock(wq, wait, action, mode);
}
EXPORT_SYMBOL(out_of_line_wait_on_bit_lock);
__wake_up_bit(page_waitqueue(page), &page->flags, bit);
}
-int wait_on_page_bit_wq(struct page *page, int bit_nr,
- struct wait_bit_queue *wq)
+int wait_on_page_bit_async(struct page *page, int bit_nr,
+ struct wait_bit_queue *wait)
{
int ret = 0;
if (test_bit(bit_nr, &page->flags)) {
+ DEFINE_WAIT_BIT(stack_wait, &page->flags, bit_nr);
int (*fn)(void *) = sync_page;
- if (!is_sync_wait(&wq->wait)) {
+ if (!wait) {
+ fn = sync_page;
+ wait = &stack_wait;
+ } else {
fn = sync_page_killable;
- ret = -EIOCBRETRY;
+ wait->key.flags = &page->flags;
+ wait->key.bit_nr = bit_nr;
}
- __wait_on_bit(page_waitqueue(page), wq, fn,
+ ret = __wait_on_bit(page_waitqueue(page), wait, fn,
TASK_UNINTERRUPTIBLE);
}
return ret;
}
-EXPORT_SYMBOL(wait_on_page_bit_wq);
+EXPORT_SYMBOL(wait_on_page_bit_async);
void wait_on_page_bit(struct page *page, int bit_nr)
{
- DEFINE_WAIT_BIT(wait, &page->flags, bit_nr);
-
- wait_on_page_bit_wq(page, bit_nr, &wait);
+ wait_on_page_bit_async(page, bit_nr, NULL);
}
EXPORT_SYMBOL(wait_on_page_bit);
void *data)
{
struct page *page;
+ int err;
page = read_cache_page_async(mapping, index, filler, data);
if (IS_ERR(page))
goto out;
- wait_on_page_locked(page);
+ err = wait_on_page_locked_async(page, current->io_wait);
+ if (err) {
+ page = ERR_PTR(err);
+ goto out;
+ }
if (!PageUptodate(page)) {
page_cache_release(page);
page = ERR_PTR(-EIO);