extern void __lock_page(struct page *page);
extern int __lock_page_killable(struct page *page);
extern void __lock_page_nosync(struct page *page);
+extern int __lock_page_wq(struct page *page, struct wait_bit_queue *);
extern void unlock_page(struct page *page);
static inline void __set_page_locked(struct page *page)
if (!trylock_page(page))
__lock_page_nosync(page);
}
+
+static inline int lock_page_wq(struct page *page, struct wait_bit_queue *wq)
+{
+ if (!trylock_page(page))
+ return __lock_page_wq(page, wq);
+
+ return 0;
+}
/*
* This is exported only for wait_on_page_locked/wait_on_page_writeback.
wait_on_page_bit(page, PG_locked);
}
+extern int wait_on_page_bit_wq(struct page *, int, struct wait_bit_queue *);
+
+static inline int wait_on_page_locked_wq(struct page *page,
+ struct wait_bit_queue *wait)
+{
+ if (PageLocked(page))
+ return wait_on_page_bit_wq(page, PG_locked, wait);
+
+ return 0;
+}
+
/*
* Wait for a page to complete writeback
*/
__ret; \
})
+static inline int is_sync_wait(wait_queue_t *wait)
+{
+ return wait->private != NULL;
+}
+
/*
* Must be called with the spinlock in the wait_queue_head_t held.
*/
spin_lock_irqsave(&q->lock, flags);
if (list_empty(&wait->task_list))
__add_wait_queue(q, wait);
- set_current_state(state);
+ if (is_sync_wait(wait))
+ set_current_state(state);
spin_unlock_irqrestore(&q->lock, flags);
}
EXPORT_SYMBOL(prepare_to_wait);
spin_lock_irqsave(&q->lock, flags);
if (list_empty(&wait->task_list))
__add_wait_queue_tail(q, wait);
- set_current_state(state);
+ if (is_sync_wait(wait))
+ set_current_state(state);
spin_unlock_irqrestore(&q->lock, flags);
}
EXPORT_SYMBOL(prepare_to_wait_exclusive);
do {
prepare_to_wait(wq, &q->wait, mode);
- if (test_bit(q->key.bit_nr, q->key.flags))
+ if (test_bit(q->key.bit_nr, q->key.flags)) {
ret = (*action)(q->key.flags);
+ if (ret)
+ break;
+ }
+ if (!is_sync_wait(&q->wait))
+ ret = -EIOCBRETRY;
} while (test_bit(q->key.bit_nr, q->key.flags) && !ret);
finish_wait(wq, &q->wait);
return ret;
if (!test_bit(q->key.bit_nr, q->key.flags))
continue;
ret = action(q->key.flags);
- if (!ret)
- continue;
+ if (!ret) {
+ if (!is_sync_wait(&q->wait))
+ ret = -EIOCBRETRY;
+ else
+ continue;
+ }
abort_exclusive_wait(wq, &q->wait, mode, &q->key);
return ret;
} while (test_and_set_bit(q->key.bit_nr, q->key.flags));
mem_cgroup_uncharge_cache_page(page);
}
-static int sync_page(void *word)
+static int sync_page_no_sched(void *word)
{
struct address_space *mapping;
struct page *page;
mapping = page_mapping(page);
if (mapping && mapping->a_ops && mapping->a_ops->sync_page)
mapping->a_ops->sync_page(page);
- io_schedule();
+
return 0;
}
+static int sync_page(void *word)
+{
+ int ret = sync_page_no_sched(word);
+
+ io_schedule();
+ return ret;
+}
+
static int sync_page_killable(void *word)
{
sync_page(word);
__wake_up_bit(page_waitqueue(page), &page->flags, bit);
}
+int wait_on_page_bit_wq(struct page *page, int bit_nr,
+ struct wait_bit_queue *wq)
+{
+ int ret = 0;
+
+ if (test_bit(bit_nr, &page->flags)) {
+ int (*fn)(void *) = sync_page;
+
+ if (!is_sync_wait(&wq->wait)) {
+ fn = sync_page_no_sched;
+ ret = -EIOCBRETRY;
+ }
+
+ __wait_on_bit(page_waitqueue(page), wq, fn,
+ TASK_UNINTERRUPTIBLE);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(wait_on_page_bit_wq);
+
void wait_on_page_bit(struct page *page, int bit_nr)
{
DEFINE_WAIT_BIT(wait, &page->flags, bit_nr);
- if (test_bit(bit_nr, &page->flags))
- __wait_on_bit(page_waitqueue(page), &wait, sync_page,
- TASK_UNINTERRUPTIBLE);
+ wait_on_page_bit_wq(page, bit_nr, &wait);
}
EXPORT_SYMBOL(wait_on_page_bit);
+
/**
* add_page_wait_queue - Add an arbitrary waiter to a page's wait queue
* @page: Page defining the wait queue of interest
}
EXPORT_SYMBOL(end_page_writeback);
+int __lock_page_wq(struct page *page, struct wait_bit_queue *wq)
+{
+ return __wait_on_bit_lock(page_waitqueue(page), wq, sync_page_no_sched,
+ TASK_UNINTERRUPTIBLE);
+}
+EXPORT_SYMBOL(__lock_page_wq);
+
/**
* __lock_page - get a lock on the page, assuming we need to sleep to get it
* @page: the page to lock