/*
- * regular read/write sync io engine
+ * mmap engine
+ *
+ * IO engine that reads/writes from files by doing memcpy to/from
+ * a memory mapped region of the file.
*
*/
#include <stdio.h>
#include "../fio.h"
#include "../os.h"
-struct mmapio_data {
- struct io_u *last_io_u;
-};
-
-static int fio_mmapio_getevents(struct thread_data *td, int fio_unused min,
- int max, struct timespec fio_unused *t)
-{
- assert(max <= 1);
-
- /*
- * we can only have one finished io_u for sync io, since the depth
- * is always 1
- */
- if (list_empty(&td->io_u_busylist))
- return 0;
-
- return 1;
-}
-
-static struct io_u *fio_mmapio_event(struct thread_data *td, int event)
-{
- struct mmapio_data *sd = td->io_ops->data;
-
- assert(event == 0);
-
- return sd->last_io_u;
-}
-
-
static int fio_mmapio_queue(struct thread_data *td, struct io_u *io_u)
{
struct fio_file *f = io_u->file;
unsigned long long real_off = io_u->offset - f->file_offset;
- struct mmapio_data *sd = td->io_ops->data;
if (io_u->ddir == DDIR_READ)
- memcpy(io_u->buf, f->mmap + real_off, io_u->buflen);
+ memcpy(io_u->xfer_buf, f->mmap + real_off, io_u->xfer_buflen);
else if (io_u->ddir == DDIR_WRITE)
- memcpy(f->mmap + real_off, io_u->buf, io_u->buflen);
+ memcpy(f->mmap + real_off, io_u->xfer_buf, io_u->xfer_buflen);
else if (io_u->ddir == DDIR_SYNC) {
- if (msync(f->mmap, f->file_size, MS_SYNC))
+ size_t len = (f->io_size + page_size - 1) & ~page_mask;
+
+ if (msync(f->mmap, len, MS_SYNC)) {
io_u->error = errno;
+ td_verror(td, io_u->error, "msync");
+ }
}
/*
* not really direct, but should drop the pages from the cache
*/
- if (td->odirect && io_u->ddir != DDIR_SYNC) {
- if (msync(f->mmap + real_off, io_u->buflen, MS_SYNC) < 0)
+ if (td->o.odirect && io_u->ddir != DDIR_SYNC) {
+ size_t len = (io_u->xfer_buflen + page_size - 1) & ~page_mask;
+ unsigned long long off = real_off & ~page_mask;
+
+ if (msync(f->mmap + off, len, MS_SYNC) < 0) {
io_u->error = errno;
- if (madvise(f->mmap + real_off, io_u->buflen, MADV_DONTNEED) < 0)
+ td_verror(td, io_u->error, "msync");
+ }
+ if (madvise(f->mmap + off, len, MADV_DONTNEED) < 0) {
io_u->error = errno;
+ td_verror(td, io_u->error, "madvise");
+ }
}
- if (!io_u->error)
- sd->last_io_u = io_u;
-
- return io_u->error;
+ return FIO_Q_COMPLETED;
}
-static void fio_mmapio_cleanup(struct thread_data *td)
+static int fio_mmapio_open(struct thread_data *td, struct fio_file *f)
{
- if (td->io_ops->data) {
- free(td->io_ops->data);
- td->io_ops->data = NULL;
+ int ret, flags;
+
+ ret = generic_open_file(td, f);
+ if (ret)
+ return ret;
+
+ if (td_rw(td))
+ flags = PROT_READ | PROT_WRITE;
+ else if (td_write(td)) {
+ flags = PROT_WRITE;
+
+ if (td->o.verify != VERIFY_NONE)
+ flags |= PROT_READ;
+ } else
+ flags = PROT_READ;
+
+ f->mmap = mmap(NULL, f->io_size, flags, MAP_SHARED, f->fd, f->file_offset);
+ if (f->mmap == MAP_FAILED) {
+ f->mmap = NULL;
+ td_verror(td, errno, "mmap");
+ goto err;
}
-}
-static int fio_mmapio_init(struct thread_data *td)
-{
- struct mmapio_data *sd = malloc(sizeof(*sd));
+ if (file_invalidate_cache(td, f))
+ goto err;
+
+ if (!td_random(td)) {
+ if (madvise(f->mmap, f->io_size, MADV_SEQUENTIAL) < 0) {
+ td_verror(td, errno, "madvise");
+ goto err;
+ }
+ } else {
+ if (madvise(f->mmap, f->io_size, MADV_RANDOM) < 0) {
+ td_verror(td, errno, "madvise");
+ goto err;
+ }
+ }
- sd->last_io_u = NULL;
- td->io_ops->data = sd;
return 0;
+
+err:
+ td->io_ops->close_file(td, f);
+ return 1;
+}
+
+static void fio_mmapio_close(struct thread_data fio_unused *td,
+ struct fio_file *f)
+{
+ if (f->mmap) {
+ munmap(f->mmap, f->io_size);
+ f->mmap = NULL;
+ }
+ generic_close_file(td, f);
}
static struct ioengine_ops ioengine = {
.name = "mmap",
.version = FIO_IOOPS_VERSION,
- .init = fio_mmapio_init,
.queue = fio_mmapio_queue,
- .getevents = fio_mmapio_getevents,
- .event = fio_mmapio_event,
- .cleanup = fio_mmapio_cleanup,
- .flags = FIO_SYNCIO | FIO_MMAPIO,
+ .open_file = fio_mmapio_open,
+ .close_file = fio_mmapio_close,
+ .flags = FIO_SYNCIO | FIO_NOEXTEND,
};
static void fio_init fio_mmapio_register(void)