mm: __lock_page_async() -EINTR missed wakeup fix aio-buffered
authorJens Axboe <jens.axboe@oracle.com>
Sat, 26 Sep 2009 15:44:13 +0000 (17:44 +0200)
committerJens Axboe <jens.axboe@oracle.com>
Sat, 26 Sep 2009 15:44:13 +0000 (17:44 +0200)
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
include/linux/pagemap.h
mm/filemap.c

index 0d154a78944125d7ca1388ba76cac99e1daf7f03..2978c73694dbbd39a8361546cccd3b44e695692c 100644 (file)
@@ -290,7 +290,7 @@ static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
 }
 
 extern void __lock_page_nosync(struct page *page);
-extern int __lock_page_async(struct page *page, struct wait_bit_queue *);
+extern int __lock_page_async(struct page *page, struct wait_bit_queue *, int);
 extern void unlock_page(struct page *page);
 
 static inline void __set_page_locked(struct page *page)
@@ -329,18 +329,8 @@ static inline void lock_page_nosync(struct page *page)
  */
 static inline int lock_page_async(struct page *page, struct wait_bit_queue *wq)
 {
-       if (!trylock_page(page)) {
-               DEFINE_WAIT_BIT(wq_stack, &page->flags, PG_locked);
-               
-               if (!wq)
-                       wq = &wq_stack;
-               else {
-                       wq->key.flags = &page->flags;
-                       wq->key.bit_nr = PG_locked;
-               }
-
-               return __lock_page_async(page, wq);
-       }
+       if (!trylock_page(page))
+               return __lock_page_async(page, wq, TASK_KILLABLE);
 
        return 0;
 }
@@ -351,7 +341,9 @@ static inline int lock_page_async(struct page *page, struct wait_bit_queue *wq)
 static inline void lock_page(struct page *page)
 {
        WARN_ON(in_aio(current));
-       lock_page_async(page, NULL);
+
+       if (!trylock_page(page))
+               __lock_page_async(page, NULL, TASK_UNINTERRUPTIBLE);
 }
 
 /*
index 26348d3bdcb4b3721df66c3c38d6e057655ccaf4..2ba67b06dd2b7dbfcdbe4c7676d53056e64e786c 100644 (file)
@@ -535,21 +535,18 @@ int wait_on_page_bit_async(struct page *page, int bit_nr,
                DEFINE_WAIT_BIT(stack_wait, &page->flags, bit_nr);
                int (*fn)(void *) = sync_page;
 
-               if (!wait) {
-                       fn = sync_page;
+               if (!wait)
                        wait = &stack_wait;
-               } else {
-                       fn = sync_page_killable;
+               else {
                        wait->key.flags = &page->flags;
                        wait->key.bit_nr = bit_nr;
+                       fn = sync_page_killable;
                }
 
                ret = __wait_on_bit(page_waitqueue(page), wait, fn,
                                                        TASK_UNINTERRUPTIBLE);
        }
 
-       if (ret)
-               printk("%s: ret=%d\n", __FUNCTION__, ret);
        return ret;
 }
 EXPORT_SYMBOL(wait_on_page_bit_async);
@@ -626,16 +623,35 @@ EXPORT_SYMBOL(end_page_writeback);
  * chances are that on the second loop, the block layer's plug list is empty,
  * so sync_page() will then return in state TASK_UNINTERRUPTIBLE.
  */
-int __lock_page_async(struct page *page, struct wait_bit_queue *wq)
+int __lock_page_async(struct page *page, struct wait_bit_queue *wq, int mode)
 {
+       DEFINE_WAIT_BIT(wq_stack, &page->flags, PG_locked);
        int (*fn)(void *) = sync_page;
+       int ret;
+
+       if (!wq)
+               wq = &wq_stack;
+       else {
+               wq->key.flags = &page->flags;
+               wq->key.bit_nr = PG_locked;
+       }
 
-       if (!is_sync_wait_bit_queue(wq))
+       if (!is_sync_wait_bit_queue(wq) || mode == TASK_KILLABLE)
                fn = sync_page_killable;
 
-       return __wait_on_bit_lock(page_waitqueue(page), wq, fn,
-                                                       TASK_UNINTERRUPTIBLE);
- }
+       /*
+        * wait_on_bit_lock uses prepare_to_wait_exclusive, so if multiple
+        * procs were waiting on this page, we were the only proc woken up.
+        *
+        * if ret != 0, we didn't actually get the lock.  We need to
+        * make sure any other waiters don't sleep forever.
+        */
+       ret = __wait_on_bit_lock(page_waitqueue(page), wq, fn, mode);
+       if (ret == -EINTR)
+               wake_up_page(page, PG_locked);
+
+       return ret;
+}
 EXPORT_SYMBOL(__lock_page_async);
 
 /**