+static int fio_mmapio_prep(struct thread_data *td, struct io_u *io_u)
+{
+ struct fio_file *f = io_u->file;
+ struct fio_mmap_data *fmd = FILE_ENG_DATA(f);
+ int ret;
+
+ /*
+ * It fits within existing mapping, use it
+ */
+ if (io_u->offset >= fmd->mmap_off &&
+ io_u->offset + io_u->buflen <= fmd->mmap_off + fmd->mmap_sz)
+ goto done;
+
+ /*
+ * unmap any existing mapping
+ */
+ if (fmd->mmap_ptr) {
+ if (munmap(fmd->mmap_ptr, fmd->mmap_sz) < 0)
+ return errno;
+ fmd->mmap_ptr = NULL;
+ }
+
+ if (fio_mmapio_prep_full(td, io_u)) {
+ td_clear_error(td);
+ ret = fio_mmapio_prep_limited(td, io_u);
+ if (ret)
+ return ret;
+ }
+
+done:
+ io_u->mmap_data = fmd->mmap_ptr + io_u->offset - fmd->mmap_off -
+ f->file_offset;
+ return 0;
+}
+
+static enum fio_q_status fio_mmapio_queue(struct thread_data *td,
+ struct io_u *io_u)
+{
+ struct fio_file *f = io_u->file;
+ struct fio_mmap_data *fmd = FILE_ENG_DATA(f);
+
+ fio_ro_check(td, io_u);
+
+ if (io_u->ddir == DDIR_READ)
+ memcpy(io_u->xfer_buf, io_u->mmap_data, io_u->xfer_buflen);
+ else if (io_u->ddir == DDIR_WRITE)
+ memcpy(io_u->mmap_data, io_u->xfer_buf, io_u->xfer_buflen);
+ else if (ddir_sync(io_u->ddir)) {
+ if (msync(fmd->mmap_ptr, fmd->mmap_sz, MS_SYNC)) {
+ io_u->error = errno;
+ td_verror(td, io_u->error, "msync");
+ }
+ } else if (io_u->ddir == DDIR_TRIM) {
+ int ret = do_io_u_trim(td, io_u);
+
+ if (!ret)
+ td_verror(td, io_u->error, "trim");
+ }
+
+
+ /*
+ * not really direct, but should drop the pages from the cache
+ */
+ if (td->o.odirect && ddir_rw(io_u->ddir)) {
+ if (msync(io_u->mmap_data, io_u->xfer_buflen, MS_SYNC) < 0) {
+ io_u->error = errno;
+ td_verror(td, io_u->error, "msync");
+ }
+ if (posix_madvise(io_u->mmap_data, io_u->xfer_buflen, POSIX_MADV_DONTNEED) < 0) {
+ io_u->error = errno;
+ td_verror(td, io_u->error, "madvise");
+ }
+ }
+
+ return FIO_Q_COMPLETED;
+}
+
+static int fio_mmapio_init(struct thread_data *td)
+{
+ struct thread_options *o = &td->o;
+
+ if ((o->rw_min_bs & page_mask) &&
+ (o->odirect || o->fsync_blocks || o->fdatasync_blocks)) {
+ log_err("fio: mmap options dictate a minimum block size of "
+ "%llu bytes\n", (unsigned long long) page_size);
+ return 1;
+ }
+
+ mmap_map_size = MMAP_TOTAL_SZ / o->nr_files;
+ return 0;
+}
+
+static int fio_mmapio_open_file(struct thread_data *td, struct fio_file *f)
+{
+ struct fio_mmap_data *fmd;
+ int ret;
+
+ ret = generic_open_file(td, f);
+ if (ret)
+ return ret;
+
+ fmd = calloc(1, sizeof(*fmd));
+ if (!fmd) {
+ int fio_unused __ret;
+ __ret = generic_close_file(td, f);
+ return 1;
+ }
+
+ FILE_SET_ENG_DATA(f, fmd);
+ return 0;
+}
+
+static int fio_mmapio_close_file(struct thread_data *td, struct fio_file *f)
+{
+ struct fio_mmap_data *fmd = FILE_ENG_DATA(f);
+
+ FILE_SET_ENG_DATA(f, NULL);
+ free(fmd);
+ fio_file_clear_partial_mmap(f);
+
+ return generic_close_file(td, f);
+}
+