From 3001eabbd761e5c49bcb5726100449b86972927d Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Sat, 26 Sep 2009 17:23:29 +0200 Subject: [PATCH] aio: async page waiting Signed-off-by: Jens Axboe --- include/linux/pagemap.h | 20 ++++++++++++++++++ include/linux/wait.h | 5 +++++ kernel/wait.c | 21 ++++++++++++++----- mm/filemap.c | 45 ++++++++++++++++++++++++++++++++++++----- 4 files changed, 81 insertions(+), 10 deletions(-) diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index ed5d7501e181..065cb319b3db 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -290,6 +290,7 @@ static inline pgoff_t linear_page_index(struct vm_area_struct *vma, extern void __lock_page(struct page *page); extern int __lock_page_killable(struct page *page); extern void __lock_page_nosync(struct page *page); +extern int __lock_page_wq(struct page *page, struct wait_bit_queue *); extern void unlock_page(struct page *page); static inline void __set_page_locked(struct page *page) @@ -340,6 +341,14 @@ static inline void lock_page_nosync(struct page *page) if (!trylock_page(page)) __lock_page_nosync(page); } + +static inline int lock_page_wq(struct page *page, struct wait_bit_queue *wq) +{ + if (!trylock_page(page)) + return __lock_page_wq(page, wq); + + return 0; +} /* * This is exported only for wait_on_page_locked/wait_on_page_writeback. @@ -360,6 +369,17 @@ static inline void wait_on_page_locked(struct page *page) wait_on_page_bit(page, PG_locked); } +extern int wait_on_page_bit_wq(struct page *, int, struct wait_bit_queue *); + +static inline int wait_on_page_locked_wq(struct page *page, + struct wait_bit_queue *wait) +{ + if (PageLocked(page)) + return wait_on_page_bit_wq(page, PG_locked, wait); + + return 0; +} + /* * Wait for a page to complete writeback */ diff --git a/include/linux/wait.h b/include/linux/wait.h index a48e16b77d5e..87448bd312fe 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h @@ -403,6 +403,11 @@ do { \ __ret; \ }) +static inline int is_sync_wait(wait_queue_t *wait) +{ + return wait->private != NULL; +} + /* * Must be called with the spinlock in the wait_queue_head_t held. */ diff --git a/kernel/wait.c b/kernel/wait.c index c4bd3d825f35..ca0b832d5ff3 100644 --- a/kernel/wait.c +++ b/kernel/wait.c @@ -73,7 +73,8 @@ prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state) spin_lock_irqsave(&q->lock, flags); if (list_empty(&wait->task_list)) __add_wait_queue(q, wait); - set_current_state(state); + if (is_sync_wait(wait)) + set_current_state(state); spin_unlock_irqrestore(&q->lock, flags); } EXPORT_SYMBOL(prepare_to_wait); @@ -87,7 +88,8 @@ prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state) spin_lock_irqsave(&q->lock, flags); if (list_empty(&wait->task_list)) __add_wait_queue_tail(q, wait); - set_current_state(state); + if (is_sync_wait(wait)) + set_current_state(state); spin_unlock_irqrestore(&q->lock, flags); } EXPORT_SYMBOL(prepare_to_wait_exclusive); @@ -198,8 +200,13 @@ __wait_on_bit(wait_queue_head_t *wq, struct wait_bit_queue *q, do { prepare_to_wait(wq, &q->wait, mode); - if (test_bit(q->key.bit_nr, q->key.flags)) + if (test_bit(q->key.bit_nr, q->key.flags)) { ret = (*action)(q->key.flags); + if (ret) + break; + } + if (!is_sync_wait(&q->wait)) + ret = -EIOCBRETRY; } while (test_bit(q->key.bit_nr, q->key.flags) && !ret); finish_wait(wq, &q->wait); return ret; @@ -227,8 +234,12 @@ __wait_on_bit_lock(wait_queue_head_t *wq, struct wait_bit_queue *q, if (!test_bit(q->key.bit_nr, q->key.flags)) continue; ret = action(q->key.flags); - if (!ret) - continue; + if (!ret) { + if (!is_sync_wait(&q->wait)) + ret = -EIOCBRETRY; + else + continue; + } abort_exclusive_wait(wq, &q->wait, mode, &q->key); return ret; } while (test_and_set_bit(q->key.bit_nr, q->key.flags)); diff --git a/mm/filemap.c b/mm/filemap.c index 6c84e598b4a9..9e2fc115166b 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -152,7 +152,7 @@ void remove_from_page_cache(struct page *page) mem_cgroup_uncharge_cache_page(page); } -static int sync_page(void *word) +static int sync_page_no_sched(void *word) { struct address_space *mapping; struct page *page; @@ -184,10 +184,18 @@ static int sync_page(void *word) mapping = page_mapping(page); if (mapping && mapping->a_ops && mapping->a_ops->sync_page) mapping->a_ops->sync_page(page); - io_schedule(); + return 0; } +static int sync_page(void *word) +{ + int ret = sync_page_no_sched(word); + + io_schedule(); + return ret; +} + static int sync_page_killable(void *word) { sync_page(word); @@ -519,16 +527,36 @@ static inline void wake_up_page(struct page *page, int bit) __wake_up_bit(page_waitqueue(page), &page->flags, bit); } +int wait_on_page_bit_wq(struct page *page, int bit_nr, + struct wait_bit_queue *wq) +{ + int ret = 0; + + if (test_bit(bit_nr, &page->flags)) { + int (*fn)(void *) = sync_page; + + if (!is_sync_wait(&wq->wait)) { + fn = sync_page_no_sched; + ret = -EIOCBRETRY; + } + + __wait_on_bit(page_waitqueue(page), wq, fn, + TASK_UNINTERRUPTIBLE); + } + + return ret; +} +EXPORT_SYMBOL(wait_on_page_bit_wq); + void wait_on_page_bit(struct page *page, int bit_nr) { DEFINE_WAIT_BIT(wait, &page->flags, bit_nr); - if (test_bit(bit_nr, &page->flags)) - __wait_on_bit(page_waitqueue(page), &wait, sync_page, - TASK_UNINTERRUPTIBLE); + wait_on_page_bit_wq(page, bit_nr, &wait); } EXPORT_SYMBOL(wait_on_page_bit); + /** * add_page_wait_queue - Add an arbitrary waiter to a page's wait queue * @page: Page defining the wait queue of interest @@ -585,6 +613,13 @@ void end_page_writeback(struct page *page) } EXPORT_SYMBOL(end_page_writeback); +int __lock_page_wq(struct page *page, struct wait_bit_queue *wq) +{ + return __wait_on_bit_lock(page_waitqueue(page), wq, sync_page_no_sched, + TASK_UNINTERRUPTIBLE); +} +EXPORT_SYMBOL(__lock_page_wq); + /** * __lock_page - get a lock on the page, assuming we need to sleep to get it * @page: the page to lock -- 2.25.1