}
extern void __lock_page(struct page *page);
-extern int __lock_page_killable(struct page *page);
extern void __lock_page_nosync(struct page *page);
-extern int __lock_page_wq(struct page *page, struct wait_bit_queue *);
+extern int __lock_page_async(struct page *page, struct wait_bit_queue *);
extern void unlock_page(struct page *page);
static inline void __set_page_locked(struct page *page)
__lock_page(page);
}
-/*
- * lock_page_killable is like lock_page but can be interrupted by fatal
- * signals. It returns 0 if it locked the page and -EINTR if it was
- * killed while waiting.
- */
-static inline int lock_page_killable(struct page *page)
-{
- might_sleep();
- if (!trylock_page(page))
- return __lock_page_killable(page);
- return 0;
-}
-
/*
* lock_page_nosync should only be used if we can't pin the page's inode.
* Doesn't play quite so well with block device plugging.
__lock_page_nosync(page);
}
-static inline int lock_page_wq(struct page *page, struct wait_bit_queue *wq)
+/*
+ * This is like lock_page(), except that it wont schedule away waiting for
+ * the page to become unlocked by IO. Instead it registers a callback
+ * in the wait_queue and returns -EIOCBRETRY. This happens if the current
+ * process has registered an "async" wait queue, if not this just blocks like
+ * lock_page(). If that happens, this helper may also return fatal signals
+ * terminating the wait, even if the page isn't locked yet.
+ */
+static inline int lock_page_async(struct page *page)
{
+ might_sleep();
+
if (!trylock_page(page)) {
+ struct wait_bit_queue *wq;
DEFINE_WAIT_BIT(wq_stack, &page->flags, PG_locked);
+ wq = current->io_wait;
if (!wq)
wq = &wq_stack;
- return __lock_page_wq(page, wq);
+ return __lock_page_async(page, wq);
}
return 0;
mem_cgroup_uncharge_cache_page(page);
}
-static int sync_page_no_sched(void *word)
+static int sync_page(void *word)
{
struct address_space *mapping;
struct page *page;
if (mapping && mapping->a_ops && mapping->a_ops->sync_page)
mapping->a_ops->sync_page(page);
+ if (!in_aio(current))
+ io_schedule();
+
return 0;
}
-static int sync_page(void *word)
+static int sync_page_killable(void *word)
{
- int ret = sync_page_no_sched(word);
+ int ret = sync_page(word);
- io_schedule();
- return ret;
-}
+ if (!ret && fatal_signal_pending(current))
+ ret = -EINTR;
-static int sync_page_killable(void *word)
-{
- sync_page(word);
- return fatal_signal_pending(current) ? -EINTR : 0;
+ return ret;
}
/**
int (*fn)(void *) = sync_page;
if (!is_sync_wait(&wq->wait)) {
- fn = sync_page_no_sched;
+ fn = sync_page_killable;
ret = -EIOCBRETRY;
}
}
EXPORT_SYMBOL(end_page_writeback);
-int __lock_page_wq(struct page *page, struct wait_bit_queue *wq)
-{
- if (wq) {
- wq->key.flags = &page->flags;
- wq->key.bit_nr = PG_locked;
- }
-
- return __wait_on_bit_lock(page_waitqueue(page), wq, sync_page_no_sched,
- TASK_UNINTERRUPTIBLE);
-}
-EXPORT_SYMBOL(__lock_page_wq);
-
/**
* __lock_page - get a lock on the page, assuming we need to sleep to get it
* @page: the page to lock
}
EXPORT_SYMBOL(__lock_page);
-int __lock_page_killable(struct page *page)
+int __lock_page_async(struct page *page, struct wait_bit_queue *wq)
{
- DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
+ if (wq) {
+ wq->key.flags = &page->flags;
+ wq->key.bit_nr = PG_locked;
+ }
- return __wait_on_bit_lock(page_waitqueue(page), &wait,
- sync_page_killable, TASK_KILLABLE);
-}
-EXPORT_SYMBOL_GPL(__lock_page_killable);
+ return __wait_on_bit_lock(page_waitqueue(page), wq, sync_page_killable,
+ TASK_UNINTERRUPTIBLE);
+ }
+EXPORT_SYMBOL(__lock_page_async);
/**
* __lock_page_nosync - get a lock on the page, without calling sync_page()
page_not_up_to_date:
/* Get exclusive access to the page ... */
- error = lock_page_wq(page, current->io_wait);
+ error = lock_page_async(page);
if (unlikely(error))
goto readpage_error;
}
if (!PageUptodate(page)) {
- error = lock_page_wq(page, current->io_wait);
+ error = lock_page_async(page);
if (unlikely(error))
goto readpage_error;
if (!PageUptodate(page)) {