/*
* Native Windows async IO engine
- * Copyright (C) 2011 Bruce Cran <bruce@cran.org.uk>
+ * Copyright (C) 2012 Bruce Cran <bruce@cran.org.uk>
*/
#include <stdio.h>
OVERLAPPED o;
struct io_u *io_u;
BOOL io_complete;
- BOOL io_free;
+ BOOL io_free;
};
struct windowsaio_data {
}
hKernel32Dll = GetModuleHandle("kernel32.dll");
- wd->pCancelIoEx = GetProcAddress(hKernel32Dll, "CancelIoEx");
-
+ wd->pCancelIoEx = (CANCELIOEX)GetProcAddress(hKernel32Dll, "CancelIoEx");
td->io_ops->data = wd;
+
return rc;
}
wd = td->io_ops->data;
if (wd != NULL) {
- wd->iothread_running = FALSE;
- WaitForSingleObject(wd->iothread, INFINITE);
+ wd->iothread_running = FALSE;
+ WaitForSingleObject(wd->iothread, INFINITE);
CloseHandle(wd->iothread);
CloseHandle(wd->iocomplete_event);
if (td->o.sync_io)
flags |= FILE_FLAG_WRITE_THROUGH;
+ /*
+ * Inform Windows whether we're going to be doing sequential or
+ * random io so it can tune the Cache Manager
+ */
if (td->o.td_ddir == TD_DDIR_READ ||
td->o.td_ddir == TD_DDIR_WRITE)
flags |= FILE_FLAG_SEQUENTIAL_SCAN;
else
access = (GENERIC_READ | GENERIC_WRITE);
- if (td->o.create_on_open > 0)
+ if (td->o.create_on_open)
openmode = OPEN_ALWAYS;
else
openmode = OPEN_EXISTING;
if (f->hFile == INVALID_HANDLE_VALUE)
rc = 1;
- /* Only set up the competion port and thread if we're not just
+ /* Only set up the completion port and thread if we're not just
* querying the device size */
- if (!rc && td->io_ops->data != NULL) {
+ if (!rc && td->io_ops->data != NULL) {
struct thread_ctx *ctx;
- struct windowsaio_data *wd;
- hFile = CreateIoCompletionPort(f->hFile, NULL, 0, 0);
+ struct windowsaio_data *wd;
- wd = td->io_ops->data;
+ hFile = CreateIoCompletionPort(f->hFile, NULL, 0, 0);
+ wd = td->io_ops->data;
wd->iothread_running = TRUE;
if (!rc) {
static int fio_windowsaio_close_file(struct thread_data fio_unused *td, struct fio_file *f)
{
int rc = 0;
-
+
dprint(FD_FILE, "fd close %s\n", f->file_name);
if (f->hFile != INVALID_HANDLE_VALUE) {
fov = (struct fio_overlapped*)io_u->engine_data;
if (fov->io_complete) {
- fov->io_complete = FALSE;
+ fov->io_complete = FALSE;
fov->io_free = TRUE;
wd->aio_events[dequeued] = io_u;
dequeued++;
break;
}
- if (dequeued < min) {
+ if (dequeued < min) {
status = WaitForSingleObject(wd->iocomplete_event, mswait);
if (status != WAIT_OBJECT_0 && dequeued > 0)
break;
return dequeued;
}
-static int fio_windowsaio_queue(struct thread_data *td,
- struct io_u *io_u)
+static int fio_windowsaio_queue(struct thread_data *td, struct io_u *io_u)
{
- LPOVERLAPPED lpOvl = NULL;
+ LPOVERLAPPED lpOvl = NULL;
struct windowsaio_data *wd;
DWORD iobytes;
- BOOL success;
+ BOOL success = FALSE;
int index;
int rc = FIO_Q_COMPLETED;
wd = td->io_ops->data;
- for (index = 0; index < td->o.iodepth; index++) {
- if (wd->ovls[index].io_free) {
- wd->ovls[index].io_free = FALSE;
- ResetEvent(wd->ovls[index].o.hEvent);
- break;
- }
- }
+ for (index = 0; index < td->o.iodepth; index++) {
+ if (wd->ovls[index].io_free) {
+ wd->ovls[index].io_free = FALSE;
+ ResetEvent(wd->ovls[index].o.hEvent);
+ break;
+ }
+ }
- assert(index < td->o.iodepth);
+ assert(index < td->o.iodepth);
- lpOvl = &wd->ovls[index].o;
- wd->ovls[index].io_u = io_u;
+ lpOvl = &wd->ovls[index].o;
+ wd->ovls[index].io_u = io_u;
lpOvl->Internal = STATUS_PENDING;
lpOvl->InternalHigh = 0;
lpOvl->Offset = io_u->offset & 0xFFFFFFFF;
lpOvl->OffsetHigh = io_u->offset >> 32;
- lpOvl->Pointer = NULL;
- io_u->engine_data = &wd->ovls[index];
+ io_u->engine_data = &wd->ovls[index];
switch (io_u->ddir) {
- case DDIR_WRITE:
+ case DDIR_WRITE:
success = WriteFile(io_u->file->hFile, io_u->xfer_buf, io_u->xfer_buflen, &iobytes, lpOvl);
break;
case DDIR_READ:
break;
default:
assert(0);
+ break;
}
- if (success || GetLastError() == ERROR_IO_PENDING) {
+ if (success || GetLastError() == ERROR_IO_PENDING)
rc = FIO_Q_QUEUED;
- } else {
+ else {
io_u->error = GetLastError();
io_u->resid = io_u->xfer_buflen;
}
io_u->error = ovl->Internal;
}
- fov->io_complete = TRUE;
+ fov->io_complete = TRUE;
SetEvent(wd->iocomplete_event);
} while (ctx->wd->iothread_running);
/* If we're running on Vista or newer, we can cancel individual IO requests */
if (wd->pCancelIoEx != NULL) {
struct fio_overlapped *ovl = io_u->engine_data;
+
if (!wd->pCancelIoEx(io_u->file->hFile, &ovl->o))
rc = 1;
} else