aio: first support for buffered async writes
[linux-block.git] / mm / filemap.c
index 6c84e598b4a9f7a0c2901387f32307c4e96ebaae..26348d3bdcb4b3721df66c3c38d6e057655ccaf4 100644 (file)
@@ -184,14 +184,21 @@ static int sync_page(void *word)
        mapping = page_mapping(page);
        if (mapping && mapping->a_ops && mapping->a_ops->sync_page)
                mapping->a_ops->sync_page(page);
-       io_schedule();
+
+       if (!in_aio(current))
+               io_schedule();
+
        return 0;
 }
 
 static int sync_page_killable(void *word)
 {
-       sync_page(word);
-       return fatal_signal_pending(current) ? -EINTR : 0;
+       int ret = sync_page(word);
+
+       if (!ret && fatal_signal_pending(current))
+               ret = -EINTR;
+
+       return ret;
 }
 
 /**
@@ -519,16 +526,41 @@ static inline void wake_up_page(struct page *page, int bit)
        __wake_up_bit(page_waitqueue(page), &page->flags, bit);
 }
 
-void wait_on_page_bit(struct page *page, int bit_nr)
+int wait_on_page_bit_async(struct page *page, int bit_nr,
+                          struct wait_bit_queue *wait)
 {
-       DEFINE_WAIT_BIT(wait, &page->flags, bit_nr);
+       int ret = 0;
+
+       if (test_bit(bit_nr, &page->flags)) {
+               DEFINE_WAIT_BIT(stack_wait, &page->flags, bit_nr);
+               int (*fn)(void *) = sync_page;
 
-       if (test_bit(bit_nr, &page->flags))
-               __wait_on_bit(page_waitqueue(page), &wait, sync_page,
+               if (!wait) {
+                       fn = sync_page;
+                       wait = &stack_wait;
+               } else {
+                       fn = sync_page_killable;
+                       wait->key.flags = &page->flags;
+                       wait->key.bit_nr = bit_nr;
+               }
+
+               ret = __wait_on_bit(page_waitqueue(page), wait, fn,
                                                        TASK_UNINTERRUPTIBLE);
+       }
+
+       if (ret)
+               printk("%s: ret=%d\n", __FUNCTION__, ret);
+       return ret;
+}
+EXPORT_SYMBOL(wait_on_page_bit_async);
+
+void wait_on_page_bit(struct page *page, int bit_nr)
+{
+       wait_on_page_bit_async(page, bit_nr, NULL);
 }
 EXPORT_SYMBOL(wait_on_page_bit);
 
+
 /**
  * add_page_wait_queue - Add an arbitrary waiter to a page's wait queue
  * @page: Page defining the wait queue of interest
@@ -586,7 +618,7 @@ void end_page_writeback(struct page *page)
 EXPORT_SYMBOL(end_page_writeback);
 
 /**
- * __lock_page - get a lock on the page, assuming we need to sleep to get it
+ * __lock_page_async - get a lock on the page, assuming we need to sleep to get it
  * @page: the page to lock
  *
  * Ugly. Running sync_page() in state TASK_UNINTERRUPTIBLE is scary.  If some
@@ -594,23 +626,17 @@ EXPORT_SYMBOL(end_page_writeback);
  * chances are that on the second loop, the block layer's plug list is empty,
  * so sync_page() will then return in state TASK_UNINTERRUPTIBLE.
  */
-void __lock_page(struct page *page)
+int __lock_page_async(struct page *page, struct wait_bit_queue *wq)
 {
-       DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
-
-       __wait_on_bit_lock(page_waitqueue(page), &wait, sync_page,
-                                                       TASK_UNINTERRUPTIBLE);
-}
-EXPORT_SYMBOL(__lock_page);
+       int (*fn)(void *) = sync_page;
 
-int __lock_page_killable(struct page *page)
-{
-       DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
+       if (!is_sync_wait_bit_queue(wq))
+               fn = sync_page_killable;
 
-       return __wait_on_bit_lock(page_waitqueue(page), &wait,
-                                       sync_page_killable, TASK_KILLABLE);
-}
-EXPORT_SYMBOL_GPL(__lock_page_killable);
+       return __wait_on_bit_lock(page_waitqueue(page), wq, fn,
+                                                       TASK_UNINTERRUPTIBLE);
+ }
+EXPORT_SYMBOL(__lock_page_async);
 
 /**
  * __lock_page_nosync - get a lock on the page, without calling sync_page()
@@ -667,24 +693,22 @@ repeat:
 }
 EXPORT_SYMBOL(find_get_page);
 
-/**
- * find_lock_page - locate, pin and lock a pagecache page
- * @mapping: the address_space to search
- * @offset: the page index
- *
- * Locates the desired pagecache page, locks it, increments its reference
- * count and returns its address.
- *
- * Returns zero if the page was not present. find_lock_page() may sleep.
- */
-struct page *find_lock_page(struct address_space *mapping, pgoff_t offset)
+struct page *find_lock_page_async(struct address_space *mapping, pgoff_t offset,
+                                 struct wait_bit_queue *wait)
 {
        struct page *page;
 
 repeat:
        page = find_get_page(mapping, offset);
        if (page) {
-               lock_page(page);
+               int ret;
+
+               ret = lock_page_async(page, wait);
+               if (ret) {
+                       page_cache_release(page);
+                       page = ERR_PTR(ret);
+                       goto out;
+               }
                /* Has the page been truncated? */
                if (unlikely(page->mapping != mapping)) {
                        unlock_page(page);
@@ -693,8 +717,26 @@ repeat:
                }
                VM_BUG_ON(page->index != offset);
        }
+out:
        return page;
 }
+EXPORT_SYMBOL(find_lock_page_async);
+
+/**
+ * find_lock_page - locate, pin and lock a pagecache page
+ * @mapping: the address_space to search
+ * @offset: the page index
+ *
+ * Locates the desired pagecache page, locks it, increments its reference
+ * count and returns its address.
+ *
+ * Returns zero if the page was not present. find_lock_page() may sleep.
+ */
+struct page *find_lock_page(struct address_space *mapping, pgoff_t offset)
+{
+
+       return find_lock_page_async(mapping, offset, NULL);
+}
 EXPORT_SYMBOL(find_lock_page);
 
 /**
@@ -993,6 +1035,7 @@ static void do_generic_file_read(struct file *filp, loff_t *ppos,
        pgoff_t last_index;
        pgoff_t prev_index;
        unsigned long offset;      /* offset into pagecache page */
+       unsigned long foo;
        unsigned int prev_offset;
        int error;
 
@@ -1009,6 +1052,7 @@ static void do_generic_file_read(struct file *filp, loff_t *ppos,
                unsigned long nr, ret;
 
                cond_resched();
+               foo = index;
 find_page:
                page = find_get_page(mapping, index);
                if (!page) {
@@ -1101,9 +1145,11 @@ page_ok:
 
 page_not_up_to_date:
                /* Get exclusive access to the page ... */
-               error = lock_page_killable(page);
-               if (unlikely(error))
+               error = lock_page_async(page, current->io_wait);
+               if (unlikely(error)) {
+                       //WARN(1, "%s: %d on index %lu, page %p\n", current->comm, error, foo, page);
                        goto readpage_error;
+               }
 
 page_not_up_to_date_locked:
                /* Did it get truncated before we got the lock? */
@@ -1132,7 +1178,7 @@ readpage:
                }
 
                if (!PageUptodate(page)) {
-                       error = lock_page_killable(page);
+                       error = lock_page_async(page, current->io_wait);
                        if (unlikely(error))
                                goto readpage_error;
                        if (!PageUptodate(page)) {
@@ -1751,11 +1797,16 @@ struct page *read_cache_page(struct address_space *mapping,
                                void *data)
 {
        struct page *page;
+       int err;
 
        page = read_cache_page_async(mapping, index, filler, data);
        if (IS_ERR(page))
                goto out;
-       wait_on_page_locked(page);
+       err = wait_on_page_locked_async(page, current->io_wait);
+       if (err) {
+               page = ERR_PTR(err);
+               goto out;
+       }
        if (!PageUptodate(page)) {
                page_cache_release(page);
                page = ERR_PTR(-EIO);
@@ -2148,8 +2199,8 @@ struct page *grab_cache_page_write_begin(struct address_space *mapping,
        if (flags & AOP_FLAG_NOFS)
                gfp_notmask = __GFP_FS;
 repeat:
-       page = find_lock_page(mapping, index);
-       if (likely(page))
+       page = find_lock_page_async(mapping, index, current->io_wait);
+       if (page || IS_ERR(page))
                return page;
 
        page = __page_cache_alloc(mapping_gfp_mask(mapping) & ~gfp_notmask);