aio: buffered cleanups
authorJens Axboe <jens.axboe@oracle.com>
Sat, 26 Sep 2009 15:30:26 +0000 (17:30 +0200)
committerJens Axboe <jens.axboe@oracle.com>
Sat, 26 Sep 2009 15:30:26 +0000 (17:30 +0200)
- Get rid of lock_page_killable() and lock_page_wq(), unifying them
  into a single lock_page_async().
- Get rid of the special sync_page_no_sched(), instead use a helper
  to check for a process special wait queue.

Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
include/linux/pagemap.h
include/linux/wait.h
mm/filemap.c

index 6f0b88c9a3e06c8fdf8e97a96c225d61c1a6d936..b1a73cc1f23fd7f8141612b447ca6e4f75077a27 100644 (file)
@@ -288,9 +288,8 @@ static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
 }
 
 extern void __lock_page(struct page *page);
-extern int __lock_page_killable(struct page *page);
 extern void __lock_page_nosync(struct page *page);
-extern int __lock_page_wq(struct page *page, struct wait_bit_queue *);
+extern int __lock_page_async(struct page *page, struct wait_bit_queue *);
 extern void unlock_page(struct page *page);
 
 static inline void __set_page_locked(struct page *page)
@@ -318,19 +317,6 @@ static inline void lock_page(struct page *page)
                __lock_page(page);
 }
 
-/*
- * lock_page_killable is like lock_page but can be interrupted by fatal
- * signals.  It returns 0 if it locked the page and -EINTR if it was
- * killed while waiting.
- */
-static inline int lock_page_killable(struct page *page)
-{
-       might_sleep();
-       if (!trylock_page(page))
-               return __lock_page_killable(page);
-       return 0;
-}
-
 /*
  * lock_page_nosync should only be used if we can't pin the page's inode.
  * Doesn't play quite so well with block device plugging.
@@ -342,15 +328,27 @@ static inline void lock_page_nosync(struct page *page)
                __lock_page_nosync(page);
 }
 
-static inline int lock_page_wq(struct page *page, struct wait_bit_queue *wq)
+/*
+ * This is like lock_page(), except that it wont schedule away waiting for
+ * the page to become unlocked by IO. Instead it registers a callback
+ * in the wait_queue and returns -EIOCBRETRY. This happens if the current
+ * process has registered an "async" wait queue, if not this just blocks like
+ * lock_page(). If that happens, this helper may also return fatal signals
+ * terminating the wait, even if the page isn't locked yet.
+ */
+static inline int lock_page_async(struct page *page)
 {
+       might_sleep();
+
        if (!trylock_page(page)) {
+               struct wait_bit_queue *wq;
                DEFINE_WAIT_BIT(wq_stack, &page->flags, PG_locked);
                
+               wq = current->io_wait;
                if (!wq)
                        wq = &wq_stack;
 
-               return __lock_page_wq(page, wq);
+               return __lock_page_async(page, wq);
        }
 
        return 0;
index 87448bd312fe3b987d68f830b069c4dd95b5eb5e..d6a073f3c3e0c7d26b2ad33551fed6979a1ce170 100644 (file)
@@ -405,9 +405,16 @@ do {                                                                       \
 
 static inline int is_sync_wait(wait_queue_t *wait)
 {
-       return wait->private != NULL;
+       return !wait || wait->private != NULL;
 }
 
+static inline int is_sync_wait_bit_queue(struct wait_bit_queue *wq)
+{
+       return is_sync_wait(&wq->wait);
+}
+
+#define in_aio(tsk)    is_sync_wait_bit_queue((tsk)->io_wait)
+
 /*
  * Must be called with the spinlock in the wait_queue_head_t held.
  */
index 1669d644a54458cf415ed1d3c288fb8e986d47a8..50a494f3ee86e1396c94cfe76ed3a3a66f44b195 100644 (file)
@@ -152,7 +152,7 @@ void remove_from_page_cache(struct page *page)
        mem_cgroup_uncharge_cache_page(page);
 }
 
-static int sync_page_no_sched(void *word)
+static int sync_page(void *word)
 {
        struct address_space *mapping;
        struct page *page;
@@ -185,21 +185,20 @@ static int sync_page_no_sched(void *word)
        if (mapping && mapping->a_ops && mapping->a_ops->sync_page)
                mapping->a_ops->sync_page(page);
 
+       if (!in_aio(current))
+               io_schedule();
+
        return 0;
 }
 
-static int sync_page(void *word)
+static int sync_page_killable(void *word)
 {
-       int ret = sync_page_no_sched(word);
+       int ret = sync_page(word);
 
-       io_schedule();
-       return ret;
-}
+       if (!ret && fatal_signal_pending(current))
+               ret = -EINTR;
 
-static int sync_page_killable(void *word)
-{
-       sync_page(word);
-       return fatal_signal_pending(current) ? -EINTR : 0;
+       return ret;
 }
 
 /**
@@ -536,7 +535,7 @@ int wait_on_page_bit_wq(struct page *page, int bit_nr,
                int (*fn)(void *) = sync_page;
 
                if (!is_sync_wait(&wq->wait)) {
-                       fn = sync_page_no_sched;
+                       fn = sync_page_killable;
                        ret = -EIOCBRETRY;
                }
 
@@ -613,18 +612,6 @@ void end_page_writeback(struct page *page)
 }
 EXPORT_SYMBOL(end_page_writeback);
 
-int __lock_page_wq(struct page *page, struct wait_bit_queue *wq)
-{
-       if (wq) {
-               wq->key.flags = &page->flags;
-               wq->key.bit_nr = PG_locked;
-       }
-
-       return __wait_on_bit_lock(page_waitqueue(page), wq, sync_page_no_sched,
-                                                       TASK_UNINTERRUPTIBLE);
-}
-EXPORT_SYMBOL(__lock_page_wq);
-
 /**
  * __lock_page - get a lock on the page, assuming we need to sleep to get it
  * @page: the page to lock
@@ -643,14 +630,17 @@ void __lock_page(struct page *page)
 }
 EXPORT_SYMBOL(__lock_page);
 
-int __lock_page_killable(struct page *page)
+int __lock_page_async(struct page *page, struct wait_bit_queue *wq)
 {
-       DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
+       if (wq) {
+               wq->key.flags = &page->flags;
+               wq->key.bit_nr = PG_locked;
+       }
 
-       return __wait_on_bit_lock(page_waitqueue(page), &wait,
-                                       sync_page_killable, TASK_KILLABLE);
-}
-EXPORT_SYMBOL_GPL(__lock_page_killable);
+       return __wait_on_bit_lock(page_waitqueue(page), wq, sync_page_killable,
+                                                       TASK_UNINTERRUPTIBLE);
+ }
+EXPORT_SYMBOL(__lock_page_async);
 
 /**
  * __lock_page_nosync - get a lock on the page, without calling sync_page()
@@ -1141,7 +1131,7 @@ page_ok:
 
 page_not_up_to_date:
                /* Get exclusive access to the page ... */
-               error = lock_page_wq(page, current->io_wait);
+               error = lock_page_async(page);
                if (unlikely(error))
                        goto readpage_error;
 
@@ -1172,7 +1162,7 @@ readpage:
                }
 
                if (!PageUptodate(page)) {
-                       error = lock_page_wq(page, current->io_wait);
+                       error = lock_page_async(page);
                        if (unlikely(error))
                                goto readpage_error;
                        if (!PageUptodate(page)) {