* queue this on the run list yet)
*/
iocb->ki_run_list.next = iocb->ki_run_list.prev = NULL;
+ iocb->ki_retry = NULL;
spin_unlock_irq(&ctx->ctx_lock);
/* Quit retrying if the i/o has been cancelled */
* Now we are all set to call the retry method in async
* context.
*/
- BUG_ON(current->io_wait != NULL);
+ BUG_ON(current->io_wait);
current->io_wait = &iocb->ki_wq;
ret = retry(iocb);
current->io_wait = NULL;
out:
spin_lock_irq(&ctx->ctx_lock);
- if (-EIOCBRETRY == ret) {
+ if (ret == -EIOCBRETRY) {
/*
* OK, now that we are done with this iteration
* and know that there is more left to go,
aio_queue_work(ctx);
}
}
+
return ret;
}
spin_lock_irq(&ctx->ctx_lock);
aio_run_iocb(req);
+#if 0
if (!list_empty(&ctx->run_list)) {
/* drain the run list */
while (__aio_run_iocbs(ctx))
;
}
+#endif
spin_unlock_irq(&ctx->ctx_lock);
aio_put_req(req); /* drop extra ref to req */
return 0;
TASK_UNINTERRUPTIBLE);
}
+ if (ret)
+ printk("%s: ret=%d\n", __FUNCTION__, ret);
return ret;
}
EXPORT_SYMBOL(wait_on_page_bit_async);
pgoff_t last_index;
pgoff_t prev_index;
unsigned long offset; /* offset into pagecache page */
+ unsigned long foo;
unsigned int prev_offset;
int error;
unsigned long nr, ret;
cond_resched();
+ foo = index;
find_page:
page = find_get_page(mapping, index);
if (!page) {
page_not_up_to_date:
/* Get exclusive access to the page ... */
error = lock_page_async(page, current->io_wait);
- if (unlikely(error))
+ if (unlikely(error)) {
+ //WARN(1, "%s: %d on index %lu, page %p\n", current->comm, error, foo, page);
goto readpage_error;
+ }
page_not_up_to_date_locked:
/* Did it get truncated before we got the lock? */