* context.
*/
BUG_ON(current->io_wait != NULL);
- current->io_wait = &iocb->ki_wait;
+ current->io_wait = &iocb->ki_wq;
ret = retry(iocb);
current->io_wait = NULL;
if (ret != -EIOCBRETRY && ret != -EIOCBQUEUED) {
- BUG_ON(!list_empty(&iocb->ki_wait.task_list));
+ BUG_ON(!list_empty(&iocb->ki_wq.wait.task_list));
aio_complete(iocb, ret, 0);
}
out:
* this is where we let go so that a subsequent
* "kick" can start the next iteration
*/
+ iocb->ki_retry = retry;
/* will make __queue_kicked_iocb succeed from here on */
INIT_LIST_HEAD(&iocb->ki_run_list);
* than retry has happened before we could queue the iocb. This also
* means that the retry could have completed and freed our iocb, no
* good. */
- BUG_ON((!list_empty(&iocb->ki_wait.task_list)));
+ BUG_ON((!list_empty(&iocb->ki_wq.wait.task_list)));
spin_lock_irqsave(&ctx->ctx_lock, flags);
/* set this inside the lock so that we can't race with aio_run_iocb()
* are nested inside ioctx lock (i.e. ctx->wait)
*/
static int aio_wake_function(wait_queue_t *wait, unsigned mode,
- int sync, void *key)
+ int sync, void *arg)
{
- struct kiocb *iocb = container_of(wait, struct kiocb, ki_wait);
+ struct wait_bit_key *key = arg;
+ struct wait_bit_queue *wb
+ = container_of(wait, struct wait_bit_queue, wait);
+ struct kiocb *iocb = container_of(wb, struct kiocb, ki_wq);
+
+ if (wb->key.flags != key->flags || wb->key.bit_nr != key->bit_nr ||
+ test_bit(key->bit_nr, key->flags))
+ return 0;
list_del_init(&wait->task_list);
kick_iocb(iocb);
req->ki_buf = (char __user *)(unsigned long)iocb->aio_buf;
req->ki_left = req->ki_nbytes = iocb->aio_nbytes;
req->ki_opcode = iocb->aio_lio_opcode;
- init_waitqueue_func_entry(&req->ki_wait, aio_wake_function);
- INIT_LIST_HEAD(&req->ki_wait.task_list);
+ init_waitqueue_func_entry(&req->ki_wq.wait, aio_wake_function);
+ INIT_LIST_HEAD(&req->ki_wq.wait.task_list);
ret = aio_setup_iocb(req);
} ki_obj;
__u64 ki_user_data; /* user's data for completion */
- wait_queue_t ki_wait;
+ struct wait_bit_queue ki_wq;
loff_t ki_pos;
void *private;
(x)->ki_dtor = NULL; \
(x)->ki_obj.tsk = tsk; \
(x)->ki_user_data = 0; \
- init_wait((&(x)->ki_wait)); \
+ init_wait((&(x)->ki_wq.wait)); \
} while (0)
#define AIO_RING_MAGIC 0xa10a10a1
static inline void exit_aio(struct mm_struct *mm) { }
#endif /* CONFIG_AIO */
-#define io_wait_to_kiocb(wait) container_of(wait, struct kiocb, ki_wait)
-
static inline struct kiocb *list_kiocb(struct list_head *h)
{
return list_entry(h, struct kiocb, ki_list);
int __lock_page_wq(struct page *page, struct wait_bit_queue *wq)
{
+ if (wq) {
+ wq->key.flags = &page->flags;
+ wq->key.bit_nr = PG_locked;
+ }
+
return __wait_on_bit_lock(page_waitqueue(page), wq, sync_page_no_sched,
TASK_UNINTERRUPTIBLE);
}
page_not_up_to_date:
/* Get exclusive access to the page ... */
- error = lock_page_killable(page);
+ error = lock_page_wq(page, current->io_wait);
if (unlikely(error))
goto readpage_error;
}
if (!PageUptodate(page)) {
- error = lock_page_killable(page);
+ error = lock_page_wq(page, current->io_wait);
if (unlikely(error))
goto readpage_error;
if (!PageUptodate(page)) {