aio: first support for buffered async writes
[linux-block.git] / mm / filemap.c
index ccea3b665c12571ac32d6d936b3862e103f7b995..26348d3bdcb4b3721df66c3c38d6e057655ccaf4 100644 (file)
 /*
  * FIXME: remove all knowledge of the buffer layer from the core VM
  */
-#include <linux/buffer_head.h> /* for generic_osync_inode */
+#include <linux/buffer_head.h> /* for try_to_free_buffers */
 
 #include <asm/mman.h>
 
-
 /*
  * Shared mappings implemented 30.11.1994. It's not fully working yet,
  * though.
@@ -59,7 +58,7 @@
 /*
  * Lock ordering:
  *
- *  ->i_mmap_lock              (vmtruncate)
+ *  ->i_mmap_lock              (truncate_pagecache)
  *    ->private_lock           (__free_pte->__set_page_dirty_buffers)
  *      ->swap_lock            (exclusive_swap_page, others)
  *        ->mapping->tree_lock
  *
  *  ->task->proc_lock
  *    ->dcache_lock            (proc_pid_lookup)
+ *
+ *  (code doesn't rely on that order, so you could switch it around)
+ *  ->tasklist_lock             (memory_failure, collect_procs_ao)
+ *    ->i_mmap_lock
  */
 
 /*
@@ -120,6 +123,8 @@ void __remove_from_page_cache(struct page *page)
        page->mapping = NULL;
        mapping->nrpages--;
        __dec_zone_page_state(page, NR_FILE_PAGES);
+       if (PageSwapBacked(page))
+               __dec_zone_page_state(page, NR_SHMEM);
        BUG_ON(page_mapped(page));
 
        /*
@@ -179,14 +184,21 @@ static int sync_page(void *word)
        mapping = page_mapping(page);
        if (mapping && mapping->a_ops && mapping->a_ops->sync_page)
                mapping->a_ops->sync_page(page);
-       io_schedule();
+
+       if (!in_aio(current))
+               io_schedule();
+
        return 0;
 }
 
 static int sync_page_killable(void *word)
 {
-       sync_page(word);
-       return fatal_signal_pending(current) ? -EINTR : 0;
+       int ret = sync_page(word);
+
+       if (!ret && fatal_signal_pending(current))
+               ret = -EINTR;
+
+       return ret;
 }
 
 /**
@@ -307,68 +319,24 @@ int wait_on_page_writeback_range(struct address_space *mapping,
 }
 
 /**
- * sync_page_range - write and wait on all pages in the passed range
- * @inode:     target inode
- * @mapping:   target address_space
- * @pos:       beginning offset in pages to write
- * @count:     number of bytes to write
- *
- * Write and wait upon all the pages in the passed range.  This is a "data
- * integrity" operation.  It waits upon in-flight writeout before starting and
- * waiting upon new writeout.  If there was an IO error, return it.
+ * filemap_fdatawait_range - wait for all under-writeback pages to complete in a given range
+ * @mapping: address space structure to wait for
+ * @start:     offset in bytes where the range starts
+ * @end:       offset in bytes where the range ends (inclusive)
  *
- * We need to re-take i_mutex during the generic_osync_inode list walk because
- * it is otherwise livelockable.
- */
-int sync_page_range(struct inode *inode, struct address_space *mapping,
-                       loff_t pos, loff_t count)
-{
-       pgoff_t start = pos >> PAGE_CACHE_SHIFT;
-       pgoff_t end = (pos + count - 1) >> PAGE_CACHE_SHIFT;
-       int ret;
-
-       if (!mapping_cap_writeback_dirty(mapping) || !count)
-               return 0;
-       ret = filemap_fdatawrite_range(mapping, pos, pos + count - 1);
-       if (ret == 0) {
-               mutex_lock(&inode->i_mutex);
-               ret = generic_osync_inode(inode, mapping, OSYNC_METADATA);
-               mutex_unlock(&inode->i_mutex);
-       }
-       if (ret == 0)
-               ret = wait_on_page_writeback_range(mapping, start, end);
-       return ret;
-}
-EXPORT_SYMBOL(sync_page_range);
-
-/**
- * sync_page_range_nolock - write & wait on all pages in the passed range without locking
- * @inode:     target inode
- * @mapping:   target address_space
- * @pos:       beginning offset in pages to write
- * @count:     number of bytes to write
+ * Walk the list of under-writeback pages of the given address space
+ * in the given range and wait for all of them.
  *
- * Note: Holding i_mutex across sync_page_range_nolock() is not a good idea
- * as it forces O_SYNC writers to different parts of the same file
- * to be serialised right until io completion.
+ * This is just a simple wrapper so that callers don't have to convert offsets
+ * to page indexes themselves
  */
-int sync_page_range_nolock(struct inode *inode, struct address_space *mapping,
-                          loff_t pos, loff_t count)
+int filemap_fdatawait_range(struct address_space *mapping, loff_t start,
+                           loff_t end)
 {
-       pgoff_t start = pos >> PAGE_CACHE_SHIFT;
-       pgoff_t end = (pos + count - 1) >> PAGE_CACHE_SHIFT;
-       int ret;
-
-       if (!mapping_cap_writeback_dirty(mapping) || !count)
-               return 0;
-       ret = filemap_fdatawrite_range(mapping, pos, pos + count - 1);
-       if (ret == 0)
-               ret = generic_osync_inode(inode, mapping, OSYNC_METADATA);
-       if (ret == 0)
-               ret = wait_on_page_writeback_range(mapping, start, end);
-       return ret;
+       return wait_on_page_writeback_range(mapping, start >> PAGE_CACHE_SHIFT,
+                                           end >> PAGE_CACHE_SHIFT);
 }
-EXPORT_SYMBOL(sync_page_range_nolock);
+EXPORT_SYMBOL(filemap_fdatawait_range);
 
 /**
  * filemap_fdatawait - wait for all under-writeback pages to complete
@@ -476,6 +444,8 @@ int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
                if (likely(!error)) {
                        mapping->nrpages++;
                        __inc_zone_page_state(page, NR_FILE_PAGES);
+                       if (PageSwapBacked(page))
+                               __inc_zone_page_state(page, NR_SHMEM);
                        spin_unlock_irq(&mapping->tree_lock);
                } else {
                        page->mapping = NULL;
@@ -556,16 +526,41 @@ static inline void wake_up_page(struct page *page, int bit)
        __wake_up_bit(page_waitqueue(page), &page->flags, bit);
 }
 
-void wait_on_page_bit(struct page *page, int bit_nr)
+int wait_on_page_bit_async(struct page *page, int bit_nr,
+                          struct wait_bit_queue *wait)
 {
-       DEFINE_WAIT_BIT(wait, &page->flags, bit_nr);
+       int ret = 0;
+
+       if (test_bit(bit_nr, &page->flags)) {
+               DEFINE_WAIT_BIT(stack_wait, &page->flags, bit_nr);
+               int (*fn)(void *) = sync_page;
 
-       if (test_bit(bit_nr, &page->flags))
-               __wait_on_bit(page_waitqueue(page), &wait, sync_page,
+               if (!wait) {
+                       fn = sync_page;
+                       wait = &stack_wait;
+               } else {
+                       fn = sync_page_killable;
+                       wait->key.flags = &page->flags;
+                       wait->key.bit_nr = bit_nr;
+               }
+
+               ret = __wait_on_bit(page_waitqueue(page), wait, fn,
                                                        TASK_UNINTERRUPTIBLE);
+       }
+
+       if (ret)
+               printk("%s: ret=%d\n", __FUNCTION__, ret);
+       return ret;
+}
+EXPORT_SYMBOL(wait_on_page_bit_async);
+
+void wait_on_page_bit(struct page *page, int bit_nr)
+{
+       wait_on_page_bit_async(page, bit_nr, NULL);
 }
 EXPORT_SYMBOL(wait_on_page_bit);
 
+
 /**
  * add_page_wait_queue - Add an arbitrary waiter to a page's wait queue
  * @page: Page defining the wait queue of interest
@@ -623,7 +618,7 @@ void end_page_writeback(struct page *page)
 EXPORT_SYMBOL(end_page_writeback);
 
 /**
- * __lock_page - get a lock on the page, assuming we need to sleep to get it
+ * __lock_page_async - get a lock on the page, assuming we need to sleep to get it
  * @page: the page to lock
  *
  * Ugly. Running sync_page() in state TASK_UNINTERRUPTIBLE is scary.  If some
@@ -631,23 +626,17 @@ EXPORT_SYMBOL(end_page_writeback);
  * chances are that on the second loop, the block layer's plug list is empty,
  * so sync_page() will then return in state TASK_UNINTERRUPTIBLE.
  */
-void __lock_page(struct page *page)
+int __lock_page_async(struct page *page, struct wait_bit_queue *wq)
 {
-       DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
-
-       __wait_on_bit_lock(page_waitqueue(page), &wait, sync_page,
-                                                       TASK_UNINTERRUPTIBLE);
-}
-EXPORT_SYMBOL(__lock_page);
+       int (*fn)(void *) = sync_page;
 
-int __lock_page_killable(struct page *page)
-{
-       DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
+       if (!is_sync_wait_bit_queue(wq))
+               fn = sync_page_killable;
 
-       return __wait_on_bit_lock(page_waitqueue(page), &wait,
-                                       sync_page_killable, TASK_KILLABLE);
-}
-EXPORT_SYMBOL_GPL(__lock_page_killable);
+       return __wait_on_bit_lock(page_waitqueue(page), wq, fn,
+                                                       TASK_UNINTERRUPTIBLE);
+ }
+EXPORT_SYMBOL(__lock_page_async);
 
 /**
  * __lock_page_nosync - get a lock on the page, without calling sync_page()
@@ -704,24 +693,22 @@ repeat:
 }
 EXPORT_SYMBOL(find_get_page);
 
-/**
- * find_lock_page - locate, pin and lock a pagecache page
- * @mapping: the address_space to search
- * @offset: the page index
- *
- * Locates the desired pagecache page, locks it, increments its reference
- * count and returns its address.
- *
- * Returns zero if the page was not present. find_lock_page() may sleep.
- */
-struct page *find_lock_page(struct address_space *mapping, pgoff_t offset)
+struct page *find_lock_page_async(struct address_space *mapping, pgoff_t offset,
+                                 struct wait_bit_queue *wait)
 {
        struct page *page;
 
 repeat:
        page = find_get_page(mapping, offset);
        if (page) {
-               lock_page(page);
+               int ret;
+
+               ret = lock_page_async(page, wait);
+               if (ret) {
+                       page_cache_release(page);
+                       page = ERR_PTR(ret);
+                       goto out;
+               }
                /* Has the page been truncated? */
                if (unlikely(page->mapping != mapping)) {
                        unlock_page(page);
@@ -730,8 +717,26 @@ repeat:
                }
                VM_BUG_ON(page->index != offset);
        }
+out:
        return page;
 }
+EXPORT_SYMBOL(find_lock_page_async);
+
+/**
+ * find_lock_page - locate, pin and lock a pagecache page
+ * @mapping: the address_space to search
+ * @offset: the page index
+ *
+ * Locates the desired pagecache page, locks it, increments its reference
+ * count and returns its address.
+ *
+ * Returns zero if the page was not present. find_lock_page() may sleep.
+ */
+struct page *find_lock_page(struct address_space *mapping, pgoff_t offset)
+{
+
+       return find_lock_page_async(mapping, offset, NULL);
+}
 EXPORT_SYMBOL(find_lock_page);
 
 /**
@@ -1030,6 +1035,7 @@ static void do_generic_file_read(struct file *filp, loff_t *ppos,
        pgoff_t last_index;
        pgoff_t prev_index;
        unsigned long offset;      /* offset into pagecache page */
+       unsigned long foo;
        unsigned int prev_offset;
        int error;
 
@@ -1046,6 +1052,7 @@ static void do_generic_file_read(struct file *filp, loff_t *ppos,
                unsigned long nr, ret;
 
                cond_resched();
+               foo = index;
 find_page:
                page = find_get_page(mapping, index);
                if (!page) {
@@ -1138,9 +1145,11 @@ page_ok:
 
 page_not_up_to_date:
                /* Get exclusive access to the page ... */
-               error = lock_page_killable(page);
-               if (unlikely(error))
+               error = lock_page_async(page, current->io_wait);
+               if (unlikely(error)) {
+                       //WARN(1, "%s: %d on index %lu, page %p\n", current->comm, error, foo, page);
                        goto readpage_error;
+               }
 
 page_not_up_to_date_locked:
                /* Did it get truncated before we got the lock? */
@@ -1169,7 +1178,7 @@ readpage:
                }
 
                if (!PageUptodate(page)) {
-                       error = lock_page_killable(page);
+                       error = lock_page_async(page, current->io_wait);
                        if (unlikely(error))
                                goto readpage_error;
                        if (!PageUptodate(page)) {
@@ -1788,11 +1797,16 @@ struct page *read_cache_page(struct address_space *mapping,
                                void *data)
 {
        struct page *page;
+       int err;
 
        page = read_cache_page_async(mapping, index, filler, data);
        if (IS_ERR(page))
                goto out;
-       wait_on_page_locked(page);
+       err = wait_on_page_locked_async(page, current->io_wait);
+       if (err) {
+               page = ERR_PTR(err);
+               goto out;
+       }
        if (!PageUptodate(page)) {
                page_cache_release(page);
                page = ERR_PTR(-EIO);
@@ -2167,20 +2181,7 @@ generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
                }
                *ppos = end;
        }
-
-       /*
-        * Sync the fs metadata but not the minor inode changes and
-        * of course not the data as we did direct DMA for the IO.
-        * i_mutex is held, which protects generic_osync_inode() from
-        * livelocking.  AIO O_DIRECT ops attempt to sync metadata here.
-        */
 out:
-       if ((written >= 0 || written == -EIOCBQUEUED) &&
-           ((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
-               int err = generic_osync_inode(inode, mapping, OSYNC_METADATA);
-               if (err < 0)
-                       written = err;
-       }
        return written;
 }
 EXPORT_SYMBOL(generic_file_direct_write);
@@ -2198,8 +2199,8 @@ struct page *grab_cache_page_write_begin(struct address_space *mapping,
        if (flags & AOP_FLAG_NOFS)
                gfp_notmask = __GFP_FS;
 repeat:
-       page = find_lock_page(mapping, index);
-       if (likely(page))
+       page = find_lock_page_async(mapping, index, current->io_wait);
+       if (page || IS_ERR(page))
                return page;
 
        page = __page_cache_alloc(mapping_gfp_mask(mapping) & ~gfp_notmask);
@@ -2312,8 +2313,6 @@ generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
 {
        struct file *file = iocb->ki_filp;
        struct address_space *mapping = file->f_mapping;
-       const struct address_space_operations *a_ops = mapping->a_ops;
-       struct inode *inode = mapping->host;
        ssize_t status;
        struct iov_iter i;
 
@@ -2323,16 +2322,6 @@ generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
        if (likely(status >= 0)) {
                written += status;
                *ppos = pos + status;
-
-               /*
-                * For now, when the user asks for O_SYNC, we'll actually give
-                * O_DSYNC
-                */
-               if (unlikely((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
-                       if (!a_ops->writepage || !is_sync_kiocb(iocb))
-                               status = generic_osync_inode(inode, mapping,
-                                               OSYNC_METADATA|OSYNC_DATA);
-               }
        }
        
        /*
@@ -2348,9 +2337,27 @@ generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
 }
 EXPORT_SYMBOL(generic_file_buffered_write);
 
-static ssize_t
-__generic_file_aio_write_nolock(struct kiocb *iocb, const struct iovec *iov,
-                               unsigned long nr_segs, loff_t *ppos)
+/**
+ * __generic_file_aio_write - write data to a file
+ * @iocb:      IO state structure (file, offset, etc.)
+ * @iov:       vector with data to write
+ * @nr_segs:   number of segments in the vector
+ * @ppos:      position where to write
+ *
+ * This function does all the work needed for actually writing data to a
+ * file. It does all basic checks, removes SUID from the file, updates
+ * modification times and calls proper subroutines depending on whether we
+ * do direct IO or a standard buffered write.
+ *
+ * It expects i_mutex to be grabbed unless we work on a block device or similar
+ * object which does not need locking at all.
+ *
+ * This function does *not* take care of syncing data in case of O_SYNC write.
+ * A caller has to handle it. This is mainly due to the fact that we want to
+ * avoid syncing under i_mutex.
+ */
+ssize_t __generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
+                                unsigned long nr_segs, loff_t *ppos)
 {
        struct file *file = iocb->ki_filp;
        struct address_space * mapping = file->f_mapping;
@@ -2447,51 +2454,37 @@ out:
        current->backing_dev_info = NULL;
        return written ? written : err;
 }
+EXPORT_SYMBOL(__generic_file_aio_write);
 
-ssize_t generic_file_aio_write_nolock(struct kiocb *iocb,
-               const struct iovec *iov, unsigned long nr_segs, loff_t pos)
-{
-       struct file *file = iocb->ki_filp;
-       struct address_space *mapping = file->f_mapping;
-       struct inode *inode = mapping->host;
-       ssize_t ret;
-
-       BUG_ON(iocb->ki_pos != pos);
-
-       ret = __generic_file_aio_write_nolock(iocb, iov, nr_segs,
-                       &iocb->ki_pos);
-
-       if (ret > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
-               ssize_t err;
-
-               err = sync_page_range_nolock(inode, mapping, pos, ret);
-               if (err < 0)
-                       ret = err;
-       }
-       return ret;
-}
-EXPORT_SYMBOL(generic_file_aio_write_nolock);
-
+/**
+ * generic_file_aio_write - write data to a file
+ * @iocb:      IO state structure
+ * @iov:       vector with data to write
+ * @nr_segs:   number of segments in the vector
+ * @pos:       position in file where to write
+ *
+ * This is a wrapper around __generic_file_aio_write() to be used by most
+ * filesystems. It takes care of syncing the file in case of O_SYNC file
+ * and acquires i_mutex as needed.
+ */
 ssize_t generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
                unsigned long nr_segs, loff_t pos)
 {
        struct file *file = iocb->ki_filp;
-       struct address_space *mapping = file->f_mapping;
-       struct inode *inode = mapping->host;
+       struct inode *inode = file->f_mapping->host;
        ssize_t ret;
 
        BUG_ON(iocb->ki_pos != pos);
 
        mutex_lock(&inode->i_mutex);
-       ret = __generic_file_aio_write_nolock(iocb, iov, nr_segs,
-                       &iocb->ki_pos);
+       ret = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos);
        mutex_unlock(&inode->i_mutex);
 
-       if (ret > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
+       if (ret > 0 || ret == -EIOCBQUEUED) {
                ssize_t err;
 
-               err = sync_page_range(inode, mapping, pos, ret);
-               if (err < 0)
+               err = generic_write_sync(file, pos, ret);
+               if (err < 0 && ret > 0)
                        ret = err;
        }
        return ret;